prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import math
import os
import geopandas as gpd
import folium
import requests
import json
import datetime
from datetime import date, timedelta
from abc import ABC, abstractmethod
from pathlib import Path
from CovidFoliumMap import CovidFoliumMap, ensure_path_exists, download_JSON_file
""" This classes generate different folium maps based on the data of the RKI using access to the
RKI Covid-19 API.
The class inherits from the CovidFoliumMap class. Here are some usefull links:
- Geodata sources for Germany
From the Bundesamt für Kartographie und Geodäsie:
License plates (wfs_kfz250): https://gdz.bkg.bund.de/index.php/default/open-data/wfs-kfz-kennzeichen-1-250-000-wfs-kfz250.html
Counties & population (wfs_vg250-ew): https://gdz.bkg.bund.de/index.php/default/open-data/wfs-verwaltungsgebiete-1-250-000-mit-einwohnerzahlen-stand-31-12-wfs-vg250-ew.html
From OpenDataLab
Good county, city, village maps with optional other meta information
Portal: http://opendatalab.de/projects/geojson-utilities/
a download from there creates 'landkreise_simplify0.geojson'. The 0 refers to highest resolution (1:250000)
GitHub: https://github.com/opendatalab-de/simple-geodata-selector
- RKI Covid-19 API
Great REST API to retrieve the Covid-19 data of the RKI
https://api.corona-zahlen.org/docs/endpoints/districts.html#districts-history-recovered
BUT:
The RKI divides Berlin in districts and that doesn't match regular geoJSON files. Therefore you should use the RKI geoJSON for
German counties/cities: https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/917fc37a709542548cc3be077a786c17_0/explore to
download 'RKI_Corona_Landkreise.geojson'
"""
class CovidFoliumMapDEcounties(CovidFoliumMap):
"""
This class will expose an interface to deal with Choropleth maps to display Covid-19 data attributes for counties and cities in Germany.
"""
def __init__(self, dataDirectory = '../data'):
""" Constructor
Args:
dataDirectory (str, optional): The data directory to be used for cached data. Defaults to '../data'.
"""
# init members
self.__dataDirectory = dataDirectory + '/'
self.__dfGeo = None
self.__dfData = None
self.__defaultMapOptions = CovidFoliumMap.mapOptions(mapDate=date.today(),
mapAlias = 'MapDEcounty',
mapLocation = [51.3, 10.5],
mapZoom = 6,
bins = [5, 25, 50, 100, 200, 400, 800, 1200, 1600, 2600],
mapAttribute = 'Robert Koch-Institut (RKI), dl-de/by-2-0, CMBT 2022',
tooltipAttributes = ['GeoName',
'Cases',
'Deaths',
'WeeklyCases',
'WeeklyDeaths',
'DailyCases',
'DailyDeaths',
'DailyRecovered',
'Incidence7DayPer100Kpopulation'])
# ensure that the data directory exists, meaning to create it if it is not available
self.__dataDirectory = ensure_path_exists(dataDirectory)
# check if it really exists
if self.__dataDirectory != '':
# get the geo JSON data frame
self.__dfGeo = self.__get_geo_data()
# get the covid data for all counties/cities in the geo dataframe
if not self.get_geo_df is None:
self.__dfData = self.__get_covid_data(self.__dfGeo)
# init base class
super().__init__(self.__dataDirectory)
def __get_geo_data(self):
""" Downloads the JSON file from the RKI server if necessary and opens it to return a geoPandas dataframe. The function throws an
exception in case of an error
Returns:
geo dataframe: the geo dataframe of the German counties and cities or None if it can't load the file
"""
# init return
geoDf = None
# the filename of the geoJSON that is used
targetFilename = self.__dataDirectory + '/' + 'RKI_Corona_Landkreise.geojson'
# check if it exist already
if not os.path.exists(targetFilename):
# download the file
print('Downloading data (RKI_Corona_Landkreise.geojson), that might take some time...')
endpoint = 'https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_Landkreisdaten/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json'
# the manual download link is
# 'https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/917fc37a709542548cc3be077a786c17_0/explore?location=51.282342%2C10.714458%2C6.71'
try:
# try to download the file
download_JSON_file(endpoint, targetFilename)
print('Download finished.')
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
# now the file should exist
if os.path.exists(targetFilename):
# load the file
geoDf = gpd.read_file(targetFilename)
#print(geoDf.head())
# finally return the geo df
return geoDf
def __get_covid_data(self, geoDf):
""" Downloads the covid-19 data from the RKI servers if necessary, caches them and opens a final csv to return a Pandas dataframe.
Returns:
covid dataframe: the covid data for the German counties and cities or None if it can't load the file
"""
# init the result
df = None
# get the date
today = date.today()
# the prefix of the CSV file is Y-m-d
preFix = today.strftime('%Y-%m-%d') + "-RKIcounty"
# the target filename of the csv to be downloaded
targetFilename = self.__dataDirectory + '/' + preFix + '-db.csv'
# check if it exist already
if os.path.exists(targetFilename):
print('using existing file: ' + targetFilename)
# read the file
df = pd.read_csv(targetFilename)
else:
print('Downloading data (yy-mm-dd--RKIcounty-db.csv), that might take some time...')
# build a result df
dfs = []
for id in geoDf['RS']:
try:
# get the data for the county
df = self.__get_county_data_from_web(id)
# add it to the list
dfs.append(df)
except:
msg = 'Error getting the data for ' + str(id) + '!'
print(msg)
try:
# finally concatenate all dfs together
df = pd.concat(dfs)
# save it to file
df.to_csv(targetFilename)
print('Download finished.')
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
# ensure RS length is 5
if not df is None:
df['RS'] = df['RS'].astype(str).str.zfill(5)
# ...and return df
return df
def __get_county_data_from_web(self, county_ID):
""" Downloads the covid-19 data for the given county-ID
Args:
county_ID string: the county-ID for which we want the data
Raises:
ValueError: In case the data is empty
Returns:
dataframe: A dataframe of the county data
"""
# the endpoint of the request
endpoint = 'https://api.corona-zahlen.org/districts/' + county_ID
# contact the server
res = requests.get(endpoint)
# check if there was a response
if res.ok:
# get the json
res = res.json()
else:
# raise an exception
res.raise_for_status()
# check if the data is not empty
if not bool(res['data']):
raise ValueError("Empty response! County ID might be invalid.")
df = | pd.json_normalize(res['data']) | pandas.json_normalize |
from creator.ingest_runs.genomic_data_loader import (
GenomicDataLoader,
GEN_FILE,
GEN_FILES,
SEQ_EXP,
SEQ_EXPS,
SEQ_EXP_GEN_FILE,
SEQ_EXP_GEN_FILES,
BIO_GEN_FILE,
)
from creator.studies.models import Study
from tests.integration.fixtures import test_study_generator # noqa F401
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.etl.load.load_v2 import LoadStage
import ast
from django.conf import settings
import os
import pandas as pd
import pytest
FAKE_STUDY = Study()
FAKE_STUDY.study_id = "SD_YE0WYE0W"
GF_EXPECTED_COLUMNS = {
CONCEPT.BIOSPECIMEN.TARGET_SERVICE_ID,
CONCEPT.GENOMIC_FILE.DATA_TYPE,
CONCEPT.GENOMIC_FILE.FILE_NAME,
CONCEPT.GENOMIC_FILE.HASH_DICT,
CONCEPT.GENOMIC_FILE.SIZE,
CONCEPT.GENOMIC_FILE.URL_LIST,
CONCEPT.GENOMIC_FILE.FILE_FORMAT,
CONCEPT.GENOMIC_FILE.ID,
CONCEPT.GENOMIC_FILE.SOURCE_FILE,
CONCEPT.GENOMIC_FILE.HARMONIZED,
CONCEPT.GENOMIC_FILE.VISIBLE,
}
@pytest.fixture
def study_generator(test_study_generator): # noqa F811
"""
Generates and returns the realistic fake study.
"""
sg = test_study_generator(study_id=FAKE_STUDY.study_id, total_specimens=5)
sg.ingest_study(dry_run=True)
# Perform mapping operations for mocking _utils.get_entities_.
sg.fake_entities = {}
# Genomic-files
gf_data = [
entry
for _, entry in sg.dataservice_payloads[GEN_FILE].items()
if entry["is_harmonized"] == "False"
]
sg.fake_entities[GEN_FILES] = | pd.DataFrame(gf_data) | pandas.DataFrame |
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import unittest
from ..tabledisplay import TableDisplay
class TestTableDisplayAPI_date_format(unittest.TestCase):
def test_date_format(self):
# given
df = pd.read_csv(os.path.dirname(__file__) + "/resources/" + 'interest-rates.csv')
df['time'] = | pd.to_datetime(df['time'], utc=True) | pandas.to_datetime |
# --------------
import pandas as pd
from collections import Counter
# Load dataset
data = pd.read_csv(path)
print(data.isnull().sum())
print('Statistical Description : \n', data.describe())
# --------------
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style(style='darkgrid')
# Store the label values
label = data['Activity']
# plot the countplot
sns.countplot(x=label)
plt.title("Distribution of Target Variable")
plt.xticks(rotation=90)
plt.show()
# --------------
# make the copy of dataset
data_copy = data.copy()
# Create an empty column
data_copy['duration'] = ""
label.head()
# Calculate the duration
duration_df = (data_copy.groupby([label[(label=='WALKING_UPSTAIRS') | (label=='WALKING_DOWNSTAIRS')], 'subject'])['duration'].count() * 1.28)
duration_df = | pd.DataFrame(duration_df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 18:19:29 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn')
#con = pdblp.BCon(debug=True, port=8194, timeout=5000)
con = pdblp.BCon(debug=False, port=8194, timeout=6000)
con.start()
index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index','NDX Index','CDAX Index' ,'DAX Index',
'ASX Index','UKX Index', 'TPX Index','NKY Index', 'SHCOMP Index' ,
'SZCOMP Index','XUTUM Index','XU100 Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
from datetime import date
start = '20040101'
today = date.today().strftime('%Y%m%d')
firstday = '19991230'
prices_open = con.bdh(index_tickers, 'PX OPEN',firstday, today)
prices_open.columns = [i[0] for i in prices_open.columns]
prices_open = prices_open[index_tickers]
prices_open_int = prices_open.interpolate(method='linear')
prices_open_w = prices_open_int.groupby(pd.Grouper(freq='W')).first()
prices_high = con.bdh(index_tickers, 'PX HIGH',firstday, today)
prices_high.columns = [i[0] for i in prices_high.columns]
prices_high = prices_high[index_tickers]
prices_high_int = prices_high.interpolate(method='linear')
prices_high_w = prices_high_int.groupby(pd.Grouper(freq='W')).max()
prices_low = con.bdh(index_tickers, 'PX LOW',firstday, today)
prices_low.columns = [i[0] for i in prices_low.columns]
prices_low = prices_low[index_tickers]
prices_low_int = prices_low.interpolate(method='linear')
prices_low_w = prices_low_int.groupby(pd.Grouper(freq='W')).min()
prices_close = con.bdh(index_tickers, 'PX LAST',firstday, today)
prices_close.columns = [i[0] for i in prices_close.columns]
prices_close = prices_close[index_tickers]
prices_close_int = prices_close.interpolate(method='linear')
prices_close_w = prices_close_int.groupby(pd.Grouper(freq='W')).last()
var_no1 = '21-1'
returns_open = prices_open_w / prices_close_w.shift(1) - 1
returns_open.columns = [var_no1+'_'+i+'_OPEN' for i in returns_open.columns]
returns_high = prices_high_w / prices_close_w.shift(1) - 1
returns_high.columns = [var_no1+'_'+i+'_HIGH' for i in returns_high.columns]
returns_low = prices_low_w / prices_close_w.shift(1) - 1
returns_low.columns = [var_no1+'_'+i+'_LOW' for i in returns_low.columns]
returns_close = prices_close_w / prices_close_w.shift(1) - 1
returns_close.columns = [var_no1+'_'+i+'_LAST' for i in returns_close.columns]
returns_fromClose_ohlc = pd.concat([returns_open, returns_high, returns_low, returns_close],axis=1)
#returns_fromClose_ohlc.columns = [('_').join(i) for i in zip(np.repeat(ohlc_tickers1,len(index_tickers)),returns_fromClose_ohlc.columns)]
#returns_fromClose_ohlc = returns_fromClose_ohlc[ohlc_tickers]
#returns_fromClose_ohlc.columns = ['21_return_ohlc_US_NY','21_return_ohlc_US_SPX','21_return_ohlc_US_CCMP', '21_return_ohlc_DE','21_return_ohlc_UK','21_return_ohlc_JP','21_return_ohlc_CH_SH','21_return_ohlc_CH_SZ', '21_return_ohlc_TR','21_return_ohlc_MX','21_return_ohlc_BR','21_return_ohlc_RU','21_return_ohlc_SA']
returns_fromClose_ohlc = returns_fromClose_ohlc[returns_fromClose_ohlc.index>=start]
returns_fromClose_ohlc.to_excel('C:/Users/sb0538/Desktop/15022020/excels/21-1_laggedexcessmarketreturnfromclose.xlsx')
##############################################################################
var_no2 = '21-2'
prices_open_w.columns = [var_no2+'_'+i+'_OPEN' for i in prices_open_w.columns]
prices_high_w.columns = [var_no2+'_'+i+'_HIGH' for i in prices_high_w.columns]
prices_low_w.columns = [var_no2+'_'+i+'_LOW' for i in prices_low_w.columns]
prices_close_w.columns = [var_no2+'_'+i+'_LAST' for i in prices_close_w.columns]
prices_ohlc = | pd.concat([prices_open_w, prices_high_w, prices_low_w, prices_close_w],axis=1) | pandas.concat |
#### Setup ####
import numpy as np
import pandas as pd
from scipy import stats
from itertools import repeat
from collections import Counter
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import SVR
from sklearn.metrics import r2_score
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.ensemble import IsolationForest
import xgboost as xgb
vColors = ["#049DD9", "#03A64A", "#F2AC29", "#F2CA80", "#F22929"]
# Import
X_train = pd.read_csv("X_train.csv")
y_train = pd.read_csv("y_train.csv")
X_test = pd.read_csv("X_test.csv")
# Distribution comparision between Train and Test Data
ks = []
for i in range(0, 832, 1):
ks.append(stats.ks_2samp(X_train.iloc[:, i], X_test.iloc[:, i]).pvalue)
np.sum(pd.DataFrame(ks)[0] < 0.001)
| pd.DataFrame(ks) | pandas.DataFrame |
from datetime import datetime
from datetime import timedelta
import pandas as pd
from typing import Mapping
import os, sys
dirname = os.path.dirname(__file__)
sys.path.append(dirname)
from constant import GOOGLE_CALENDER_COLS, GOOGLE_CALENDER_FUNCS, GOOGLE_CALENDER_MAPS, EVENT_DAYS, EVENT_COLS
class CoupleEvent(object):
def __init__(self, date_of_dating=None):
if date_of_dating is None:
pass
else:
self.date_of_dating = self._get_date_to_str(date_of_dating)
self.events = pd.DataFrame(EVENT_DAYS, columns=EVENT_COLS)
self.events = self._transform_date(self.events)
def replace_event_table(self, events):
self.events = events
def get_events(self, d_days=True):
self._preprocess()
if d_days:
events = self.events.copy()
events["D-Days"] = (events["날짜"] - self.get_today()).apply(
lambda x: f"D-{x.days+1}" if x.days + 1 > 0 else "Terminate"
)
return events
else:
return self.events
def add_new_events(self, events: Mapping[str, Mapping[str, str]] = {}):
new_events = []
for event_name, event_desc in events.items():
new_event = {"이벤트": event_name, "날짜": event_desc["날짜"], "설명": event_desc["설명"]}
new_events.append(new_event)
new_df = self._transform_date(pd.DataFrame(new_events))
self.events = pd.concat([self.events, new_df], axis=0)
self._preprocess()
def _transform_date(self, df: pd.DataFrame):
df_ = df.copy()
current_year = self.get_today().year
df_["날짜"] = df["날짜"].apply(lambda x: datetime.strptime(f"{current_year}{x}", "%Y%m%d"))
return df_
def _preprocess(self):
self._drop_duplicated()
self._sort_by_date()
self._reset_index()
def _reset_index(self):
self.events = self.events.reset_index(drop=True)
def _drop_duplicated(self):
self.events = self.events.drop_duplicates()
def _sort_by_date(self):
self.events = self.events.sort_values(by=["날짜"])
def _get_date_to_str(self, string_date_Ymd="%Y%m%d"):
return datetime.strptime(string_date_Ymd, "%Y%m%d")
def add_days(self, date: datetime, number_of_day: int):
return date + timedelta(number_of_day)
def get_today(self):
today = datetime.today()
return today
def get_current_year(self):
today = self.get_today()
current_year_first_day = today.replace(month=1, day=1)
return current_year_first_day
def get_next_year(self):
today = self.get_today()
next_year_first_day = today.replace(today.year + 1, month=1, day=1)
return next_year_first_day
def make_anniversary(self, period: int = 100):
new_date = self.date_of_dating
current_year_first_day = self.get_current_year()
next_year_first_day = self.get_next_year()
number_of_periods = 0
current_year_event_collection = []
while True:
number_of_periods += period
new_date = self.add_days(new_date, period)
if (new_date > current_year_first_day) & (new_date < next_year_first_day):
if number_of_periods % 365 == 0:
event_year = int(number_of_periods / 365)
event = {"날짜": new_date, "이벤트": f"{event_year}주년", "설명": f"{event_year}주년(필수)"}
else:
event = {"날짜": new_date, "이벤트": f"{number_of_periods}일", "설명": f"{number_of_periods}일"}
current_year_event_collection.append(event)
else:
if new_date > next_year_first_day:
break
new_df = | pd.DataFrame(current_year_event_collection) | pandas.DataFrame |
# License: Apache-2.0
from gators.encoders.target_encoder import TargetEncoder
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}})
obj = TargetEncoder().fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_float32():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}}).astype(np.float32)
obj = TargetEncoder(dtype=np.float32).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_no_cat():
X = pd.DataFrame(
np.zeros((6, 3)),
columns=list('ABC'),
)
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
obj = TargetEncoder().fit(X, y)
return obj, X, X.copy()
@pytest.fixture
def data_ks():
X = ks.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}})
obj = TargetEncoder().fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_float32_ks():
X = ks.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}}).astype(np.float32)
obj = TargetEncoder(dtype=np.float32).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_no_cat_ks():
X = ks.DataFrame(
np.zeros((6, 3)),
columns=list('ABC'),
)
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
obj = TargetEncoder().fit(X, y)
return obj, X, X.to_pandas().copy()
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
def test_float32_pd(data_float32):
obj, X, X_expected = data_float32
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_ks(data_float32_ks):
obj, X, X_expected = data_float32_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_float32_pd_np(data_float32):
obj, X, X_expected = data_float32
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_ks_np(data_float32_ks):
obj, X, X_expected = data_float32_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
__all__ = [
'get_calc_rule_ids',
'get_grouped_fm_profile_by_level_and_term_group',
'get_grouped_fm_terms_by_level_and_term_group',
'get_il_input_items',
'get_policytc_ids',
'write_il_input_files',
'write_fm_policytc_file',
'write_fm_profile_file',
'write_fm_programme_file',
'write_fm_xref_file'
]
import copy
import os
import sys
import warnings
import pandas as pd
import numpy as np
from ..utils.calc_rules import get_calc_rules
from ..utils.coverages import SUPPORTED_COVERAGE_TYPES
from ..utils.data import (
factorize_ndarray,
fast_zip_arrays,
get_dataframe,
get_ids,
merge_check,
merge_dataframes,
set_dataframe_column_dtypes,
)
from ..utils.defaults import (
get_default_accounts_profile,
get_default_exposure_profile,
get_default_fm_aggregation_profile,
OASIS_FILES_PREFIXES,
SOURCE_IDX,
)
from ..utils.exceptions import OasisException
from ..utils.fm import (
DEDUCTIBLE_AND_LIMIT_TYPES,
SUPPORTED_FM_LEVELS,
)
from ..utils.log import oasis_log
from ..utils.path import as_path
from ..utils.profiles import (
get_fm_terms_oed_columns,
get_grouped_fm_profile_by_level_and_term_group,
get_grouped_fm_terms_by_level_and_term_group,
get_oed_hierarchy,
)
pd.options.mode.chained_assignment = None
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_calc_rule_ids(il_inputs_df):
"""
Returns a Numpy array of calc. rule IDs from a table of IL input items
:param il_inputs_df: IL input items dataframe
:type il_inputs_df: pandas.DataFrame
:return: Numpy array of calc. rule IDs
:rtype: numpy.ndarray
"""
calc_rules = get_calc_rules().drop(['desc'], axis=1)
calc_rules['id_key'] = calc_rules['id_key'].apply(eval)
terms = ['deductible', 'deductible_min', 'deductible_max', 'limit', 'share', 'attachment']
terms_indicators = ['{}_gt_0'.format(t) for t in terms]
types_and_codes = ['ded_type', 'ded_code', 'lim_type', 'lim_code']
il_inputs_calc_rules_df = il_inputs_df.loc[:, ['item_id'] + terms + terms_indicators + types_and_codes + ['calcrule_id']]
il_inputs_calc_rules_df.loc[:, terms_indicators] = np.where(il_inputs_calc_rules_df[terms] > 0, 1, 0)
il_inputs_calc_rules_df['id_key'] = [t for t in fast_zip_arrays(*il_inputs_calc_rules_df.loc[:, terms_indicators + types_and_codes].transpose().values)]
il_inputs_calc_rules_df = merge_dataframes(il_inputs_calc_rules_df, calc_rules, how='left', on='id_key').fillna(0)
il_inputs_calc_rules_df['calcrule_id'] = il_inputs_calc_rules_df['calcrule_id'].astype('uint32')
if 0 in il_inputs_calc_rules_df.calcrule_id.unique():
err_msg = 'Calculation Rule mapping error, non-matching keys:\n'
no_match_keys = il_inputs_calc_rules_df.loc[
il_inputs_calc_rules_df.calcrule_id == 0
].id_key.unique()
err_msg += ' {}\n'.format(tuple(terms_indicators + types_and_codes))
for key_id in no_match_keys:
err_msg += ' {}\n'.format(key_id)
raise OasisException(err_msg)
return il_inputs_calc_rules_df['calcrule_id'].values
def get_policytc_ids(il_inputs_df):
"""
Returns a Numpy array of policy TC IDs from a table of IL input items
:param il_inputs_df: IL input items dataframe
:type il_inputs_df: pandas.DataFrame
:return: Numpy array of policy TC IDs
:rtype: numpy.ndarray
"""
policytc_cols = [
'layer_id', 'level_id', 'agg_id', 'calcrule_id', 'limit',
'deductible', 'deductible_min', 'deductible_max', 'attachment',
'share'
]
fm_policytc_df = il_inputs_df.loc[:, ['item_id'] + policytc_cols].drop_duplicates()
fm_policytc_df = fm_policytc_df[
(fm_policytc_df['layer_id'] == 1) |
(fm_policytc_df['level_id'] == fm_policytc_df['level_id'].max())
]
return factorize_ndarray(fm_policytc_df.loc[:, policytc_cols[3:]].values, col_idxs=range(len(policytc_cols[3:])))[0]
@oasis_log
def get_il_input_items(
exposure_df,
gul_inputs_df,
accounts_df=None,
accounts_fp=None,
exposure_profile=get_default_exposure_profile(),
accounts_profile=get_default_accounts_profile(),
fm_aggregation_profile=get_default_fm_aggregation_profile()
):
"""
Generates and returns a Pandas dataframe of IL input items.
:param exposure_df: Source exposure
:type exposure_df: pandas.DataFrame
:param gul_inputs_df: GUL input items
:type gul_inputs_df: pandas.DataFrame
:param accounts_df: Source accounts dataframe (optional)
:param accounts_df: pandas.DataFrame
:param accounts_fp: Source accounts file path (optional)
:param accounts_fp: str
:param exposure_profile: Source exposure profile (optional)
:type exposure_profile: dict
:param accounts_profile: Source accounts profile (optional)
:type accounts_profile: dict
:param fm_aggregation_profile: FM aggregation profile (optional)
:param fm_aggregation_profile: dict
:return: IL inputs dataframe
:rtype: pandas.DataFrame
:return Accounts dataframe
:rtype: pandas.DataFrame
"""
# Get the grouped exposure + accounts profile - this describes the
# financial terms found in the source exposure and accounts files,
# which are for the following FM levels: site coverage (# 1),
# site pd (# 2), site all (# 3), cond. all (# 6), policy all (# 9),
# policy layer (# 10). It also describes the OED hierarchy terms
# present in the exposure and accounts files, namely portfolio num.,
# acc. num., loc. num., and cond. num.
profile = get_grouped_fm_profile_by_level_and_term_group(exposure_profile, accounts_profile)
if not profile:
raise OasisException(
'Unable to get a unified FM profile by level and term group. '
'Canonical loc. and/or acc. profiles are possibly missing FM term information: '
'FM term definitions for TIV, deductibles, limit, and/or share.'
)
# Get the FM aggregation profile - this describes how the IL input
# items are to be aggregated in the various FM levels
fmap = fm_aggregation_profile
if not fmap:
raise OasisException(
'FM aggregation profile is empty - this is required to perform aggregation'
)
# Get the OED hierarchy terms profile - this defines the column names for loc.
# ID, acc. ID, policy no. and portfolio no., as used in the source exposure
# and accounts files. This is to ensure that the method never makes hard
# coded references to the corresponding columns in the source files, as
# that would mean that changes to these column names in the source files
# may break the method
oed_hierarchy = get_oed_hierarchy(exposure_profile, accounts_profile)
acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
policy_num = oed_hierarchy['polnum']['ProfileElementName'].lower()
portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
cond_num = oed_hierarchy['condnum']['ProfileElementName'].lower()
# Get the FM terms profile (this is a simplfied view of the main grouped
# profile, containing only information about the financial terms)
fm_terms = get_grouped_fm_terms_by_level_and_term_group(grouped_profile_by_level_and_term_group=profile)
# Get the list of financial terms columns for the cond. all (# 6),
# policy all (# 9) and policy layer (# 10) FM levels - all of these columns
# are in the accounts file, not the exposure file, so will have to be
# sourced from the accounts dataframe
cond_pol_layer_levels = ['cond all', 'policy all', 'policy layer']
terms_floats = ['deductible', 'deductible_min', 'deductible_max', 'limit', 'attachment', 'share']
terms_ints = ['ded_code', 'ded_type', 'lim_code', 'lim_type']
terms = terms_floats + terms_ints
term_cols_floats = get_fm_terms_oed_columns(
fm_terms,
levels=cond_pol_layer_levels,
terms=terms_floats
)
term_cols_ints = get_fm_terms_oed_columns(
fm_terms,
levels=cond_pol_layer_levels,
terms=terms_ints
)
term_cols = term_cols_floats + term_cols_ints
# Set defaults and data types for all the financial terms columns in the
# accounts dataframe
defaults = {
**{t: 0.0 for t in term_cols_floats},
**{t: 0 for t in term_cols_ints},
**{cond_num: 0},
**{portfolio_num: '1'}
}
dtypes = {
**{t: 'str' for t in [acc_num, portfolio_num, policy_num]},
**{t: 'float64' for t in term_cols_floats},
**{t: 'uint8' for t in term_cols_ints},
**{t: 'uint16' for t in [cond_num]},
**{t: 'uint32' for t in ['layer_id']}
}
# Get the accounts frame either directly or from a file path if provided
accounts_df = accounts_df if accounts_df is not None else get_dataframe(
src_fp=accounts_fp,
col_dtypes=dtypes,
col_defaults=defaults,
required_cols=(acc_num, policy_num, portfolio_num,),
empty_data_error_msg='No accounts found in the source accounts (loc.) file',
memory_map=True,
)
accounts_df[SOURCE_IDX['acc']] = accounts_df.index
if not (accounts_df is not None or accounts_fp):
raise OasisException('No accounts frame or file path provided')
# Look for a `layer_id` column in the accounts dataframe - this column
# will exist if the accounts file has the column - the user has the option
# of doing this before calling the MDK. The `layer_id` column is simply
# an enumeration of the unique (portfolio num., acc. num., policy num.)
# combinations in the accounts file. If the column doesn't exist then
# a custom method is called that will generate this column and set it
# in the accounts dataframe
if 'layer_id' not in accounts_df:
accounts_df['layer_id'] = get_ids(accounts_df, [portfolio_num, acc_num, policy_num], group_by=[portfolio_num, acc_num])
# Drop all columns from the accounts dataframe which are not either one of
# portfolio num., acc. num., policy num., cond. numb., layer ID, or one of
# the source columns for the financial terms present in the accounts file (the
# file should contain all financial terms relating to the cond. all (# 6),
# policy all (# 9) and policy layer (# 10) FM levels)
usecols = [acc_num, portfolio_num, policy_num, cond_num, 'layer_id', SOURCE_IDX['acc']] + term_cols
accounts_df.drop([c for c in accounts_df.columns if c not in usecols], axis=1, inplace=True)
try:
# Create a list of all the IL columns for the site pd (# 2) and site all (# 3)
# levels - these columns are in the exposure file, not the accounts
# file, and so must be sourced from the exposure dataframe
site_pd_and_site_all_term_cols_floats = get_fm_terms_oed_columns(fm_terms, levels=['site pd', 'site all'], terms=terms_floats)
site_pd_and_site_all_term_cols_ints = get_fm_terms_oed_columns(fm_terms, levels=['site pd', 'site all'], terms=terms_ints)
site_pd_and_site_all_term_cols = site_pd_and_site_all_term_cols_floats + site_pd_and_site_all_term_cols_ints
# Check if any of these columns are missing in the exposure frame, and if so
# set the missing columns with a default value of 0.0 in the exposure frame
missing_floats = set(site_pd_and_site_all_term_cols_floats).difference(exposure_df.columns)
missing_ints = set(site_pd_and_site_all_term_cols_ints).difference(exposure_df.columns)
defaults = {
**{t: 0.0 for t in missing_floats},
**{t: 0 for t in missing_ints}
}
if defaults:
exposure_df = get_dataframe(src_data=exposure_df, col_defaults=defaults)
# First, merge the exposure and GUL inputs frame to augment the GUL inputs
# frame with financial terms for level 2 (site PD) and level 3 (site all) -
# the GUL inputs frame effectively only contains financial terms related to
# FM level 1 (site coverage)
gul_inputs_df = merge_dataframes(
exposure_df.loc[:, site_pd_and_site_all_term_cols + ['loc_id']],
gul_inputs_df,
join_on='loc_id',
how='inner'
)
gul_inputs_df.rename(columns={'item_id': 'gul_input_id'}, inplace=True)
dtypes = {t: 'float64' for t in site_pd_and_site_all_term_cols}
gul_inputs_df = set_dataframe_column_dtypes(gul_inputs_df, dtypes)
# check for empty intersection between dfs
merge_check(
gul_inputs_df[[portfolio_num, acc_num, 'layer_id', cond_num]],
accounts_df[[portfolio_num, acc_num, 'layer_id', cond_num]],
on=[portfolio_num, acc_num, 'layer_id', cond_num]
)
# Construct a basic IL inputs frame by merging the combined exposure +
# GUL inputs frame above, with the accounts frame, on portfolio no.,
# account no. and layer ID (by default items in the GUL inputs frame
# are set with a layer ID of 1)
il_inputs_df = merge_dataframes(
gul_inputs_df,
accounts_df,
on=[portfolio_num, acc_num, 'layer_id', cond_num],
how='left',
drop_duplicates=True
)
# Mark the exposure dataframes for deletion
del exposure_df
# At this point the IL inputs frame will contain essentially only
# items for the coverage FM level, but will include multiple items
# relating to single GUL input items (the higher layer items).
# If the merge is empty raise an exception - this will happen usually
# if there are no common acc. numbers between the GUL input items and
# the accounts listed in the accounts file
if il_inputs_df.empty:
raise OasisException(
'Inner merge of the GUL inputs + exposure file dataframe '
'and the accounts file dataframe ({}) on acc. number '
'is empty - '
'please check that the acc. number columns in the exposure '
'and accounts files respectively have a non-empty '
'intersection'.format(accounts_fp)
)
# Drop all columns from the IL inputs dataframe which aren't one of
# necessary columns in the GUL inputs dataframe, or one of policy num.,
# GUL input item ID, or one of the source columns for the
# non-coverage FM levels (site PD (# 2), site all (# 3), cond. all (# 6),
# policy all (# 9), policy layer (# 10))
usecols = (
gul_inputs_df.columns.to_list() +
[policy_num, 'gul_input_id'] +
([SOURCE_IDX['loc']] if SOURCE_IDX['loc'] in il_inputs_df else []) +
([SOURCE_IDX['acc']] if SOURCE_IDX['acc'] in il_inputs_df else []) +
site_pd_and_site_all_term_cols +
term_cols
)
il_inputs_df.drop(
[c for c in il_inputs_df.columns if c not in usecols],
axis=1,
inplace=True
)
# Mark the GUL inputs frame for deletion - no longer needed
del gul_inputs_df
# The coverage FM level (site coverage, # 1) ID
cov_level_id = SUPPORTED_FM_LEVELS['site coverage']['id']
# Now set the IL input item IDs, and some other required columns such
# as the level ID, and initial values for some financial terms,
# including the calcrule ID and policy TC ID
il_inputs_df = il_inputs_df.assign(
level_id=cov_level_id,
attachment=0.0,
share=0.0,
calcrule_id=0,
policytc_id=0
)
# Set data types for the newer columns just added
dtypes = {
**{t: 'float64' for t in ['attachment', 'share']},
**{t: 'uint32' for t in ['level_id', 'calcrule_id', 'policytc_id']}
}
il_inputs_df = set_dataframe_column_dtypes(il_inputs_df, dtypes)
# Drop any items with layer IDs > 1, reset index ad order items by
# GUL input ID.
il_inputs_df = il_inputs_df[il_inputs_df['layer_id'] == 1]
il_inputs_df.reset_index(drop=True, inplace=True)
il_inputs_df.sort_values('gul_input_id', axis=0, inplace=True)
# At this stage the IL inputs frame should only contain coverage level
# layer 1 inputs, and the financial terms are already present from the
# earlier merge with the exposure and GUL inputs frame - the GUL inputs
# frame should already contain the coverage level terms
# The list of financial terms for the sub-layer levels, which are
# site pd (# 2), site all (# 3), cond. all (# 6), policy all (# 9) -
# the terms for these levels do not include "attachment" or share",
# which do exist for the (policy) layer level (# 10); also the
# layer level terms do not include ded. or limit codes or types
terms_floats.remove('attachment')
terms_floats.remove('share')
terms = terms_floats + terms_ints
# Steps to filter out any intermediate FM levels which have no
# financial terms, and also drop all the OED columns for the terms
# defined for these levels
def level_has_fm_terms(level, terms):
try:
level_terms_cols = get_fm_terms_oed_columns(fm_terms, levels=[level], terms=terms)
return il_inputs_df.loc[:, level_terms_cols].any().any()
except KeyError:
return False
intermediate_fm_levels = [
level for level in list(SUPPORTED_FM_LEVELS)[1:-1]
if level_has_fm_terms(level, terms)
]
fm_levels_with_no_terms = list(set(list(SUPPORTED_FM_LEVELS)[1:-1]).difference(intermediate_fm_levels))
no_terms_cols = get_fm_terms_oed_columns(fm_terms, levels=fm_levels_with_no_terms, terms=terms)
il_inputs_df.drop(no_terms_cols, axis=1, inplace=True)
# Define a list of all supported OED coverage types in the exposure
supp_cov_types = [v['id'] for v in SUPPORTED_COVERAGE_TYPES.values()]
# For coverage level (level_id = 1) set the `agg_id` to `coverage id`
il_inputs_df.agg_id = il_inputs_df.coverage_id
# The main loop for processing the financial terms for the sub-layer
# non-coverage levels - currently these are site pd (# 2), site all (# 3),
# cond. all (# 6), policy all (# 9).
#
# Each level is initially a dataframe copy of the main IL inputs
# dataframe, which at the start only represents coverage level input
# items. Using the level terms profile the following steps take place
# in the loop:
#
# (1) financial terms defined for the level are set
# (2) coverage type filters for the blanket deductibles and limits, if
# they are defined in the profiles, are applied
# (3) any blanket deductibles or limits which are expressed as TIV
# ratios are converted to TIV shares
#
# Finally, the processed level dataframe is concatenated with the
# main IL inputs dataframe, with the financial terms OED columns for
# level removed
for level in intermediate_fm_levels:
level_id = SUPPORTED_FM_LEVELS[level]['id']
level_terms = [t for t in terms if fm_terms[level_id][1].get(t)]
level_term_cols = get_fm_terms_oed_columns(fm_terms, level_ids=[level_id], terms=terms)
level_df = il_inputs_df[il_inputs_df['level_id'] == cov_level_id].drop_duplicates()
level_df['level_id'] = level_id
agg_key = [v['field'].lower() for v in fmap[level_id]['FMAggKey'].values()]
level_df['agg_id'] = factorize_ndarray(level_df.loc[:, agg_key].values, col_idxs=range(len(agg_key)))[0]
if level == 'cond all':
level_df.loc[:, level_term_cols] = level_df.loc[:, level_term_cols].fillna(0)
else:
level_df.loc[:, level_term_cols] = level_df.loc[:, level_term_cols].fillna(method='ffill')
level_df.loc[:, level_term_cols] = level_df.loc[:, level_term_cols].fillna(0)
level_df.loc[:, level_terms] = level_df.loc[:, level_term_cols].values
level_df['deductible'] = np.where(
level_df['coverage_type_id'].isin((profile[level_id][1].get('deductible') or {}).get('CoverageTypeID') or supp_cov_types),
level_df['deductible'],
0
)
level_df['limit'] = np.where(
level_df['coverage_type_id'].isin((profile[level_id][1].get('limit') or {}).get('CoverageTypeID') or supp_cov_types),
level_df['limit'],
0
)
il_inputs_df = | pd.concat([il_inputs_df, level_df], sort=True, ignore_index=True) | pandas.concat |
"""
Several references:
A good, comic tutorial to learn Markov Chain:
https://hackernoon.com/from-what-is-a-markov-model-to-here-is-how-markov-models-work-1ac5f4629b71
Tutorial (example code for using metworkx graphviz with pandas dataframe):
http://www.blackarbs.com/blog/introduction-hidden-markov-models-python-networkx-sklearn/2/9/2017
"""
import xmltodict, io, glob, json, os, re, random
from collections import defaultdict
import numpy as np
import random as rm
from itertools import chain
import pandas as pd
# import networkx.drawing.nx_pydot as gl
import networkx as nx
import matplotlib.pyplot as plt
from pprint import pprint
##matplotlib inline
ms_tags = ['CQ', 'FD', 'FQ', 'GG', 'IR', 'JK', 'NF', 'O', 'OQ', 'PA', 'PF', 'RQ']
ms_file = os.path.normpath(r'./data/msdialog/MSDialog-Intent.json')
ms_json =open(ms_file, 'r', encoding='utf8').read()
ms_dict = json.loads(ms_json)
# loading MSIntent json file
ms_intentlist = []
for secnum in ms_dict.keys():
for utterance in ms_dict[secnum]["utterances"]:
utt_tags = utterance["tags"].replace(" GG", "").replace("GG ", "")
ms_intentlist.append(tuple([secnum,utterance["id"], utt_tags]))
# Markov model
# count dictionary
ct_dict = defaultdict(lambda: defaultdict(int))
rawct_dict = defaultdict(int)
START = "INITIAL"
END = "TERMINAL"
UNK = "<UNKNOWN>"
prev_tags = [START]
prev_sec = "0"
for ms in ms_intentlist:
current_tags = ms[2].split(" ")
if "" in current_tags:
current_tags.remove("")
current_sec = ms[0]
if current_sec == prev_sec or prev_sec == "0":
for j in current_tags:
rawct_dict[j] += 1
for i in prev_tags:
ct_dict[i][j] += 1
else:
for i in prev_tags:
ct_dict[i][END] += 1
for j in current_tags:
ct_dict[START][j] += 1
rawct_dict[j] += 1
prev_tags = current_tags
prev_sec = current_sec
# create state space and initial state probabilities
states = ms_tags
pi = [1] + [0]*(len(ms_tags)-1)
state_space = | pd.Series(pi, index=states, name='states') | pandas.Series |
import sys
import pandas as pd
import os
import numpy as np
import random
from math import ceil
from igraph import Graph
from signet.cluster import Cluster
from scipy import sparse as sp
from scipy import io
import networkx as nx
from sklearn import metrics
import seaborn as sns
import time
import graphC
wd = os.getcwd()
sys.path.append(wd)
os.chdir(wd)
pos_adj = np.loadtxt('Input/HT_pos_edges_adj.csv', delimiter=' ')
neg_adj = np.loadtxt('Input/HT_neg_edges_adj.csv', delimiter=' ')
pos_adj_sp = sp.csc_matrix(pos_adj)
neg_adj_sp = sp.csc_matrix(neg_adj)
c = Cluster((pos_adj_sp, neg_adj_sp))
L_none = c.spectral_cluster_laplacian(k = 3, normalisation='none')
L_none = pd.DataFrame(L_none).T
L_sym = c.spectral_cluster_laplacian(k = 3, normalisation='sym')
L_sym = pd.DataFrame(L_sym).T
L_sym_sep = c.spectral_cluster_laplacian(k = 3, normalisation='sym_sep')
L_sym_sep = pd.DataFrame(L_sym_sep).T
print(L_sym_sep)
BNC_none = c.spectral_cluster_bnc(k=3, normalisation='none')
BNC_none = | pd.DataFrame(BNC_none) | pandas.DataFrame |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = | pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0) | pandas.read_csv |
import sys
import os
import pandas as pd
import streamlit as st
from datetime import datetime
from streamlit import cli as stcli
from optimization import Optmizer
from portfolio import Portfolio_Analyzer
class Dashboard():
def start():
st.title("Portfolio Analysis")
df = pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
})
st.write(df)
sys.argv = ["streamlit", "run", __file__]
sys.exit(stcli.main())
def show():
st.title("Portfolio Optimization")
st.markdown("#### Select the assets: ")
symbols = [
'GBTC',
'EXPI',
'AMD',
'FIV',
'CYRX',
'NVDA',
'ENPH',
'RNG',
'APPS',
'HAL',
'SLB',
'OXY',
'EOG',
'HES',
'XOM',
'APA',
'COP',
'PXD',
'AMZN',
'MSFT',
'DISCK',
'DVN'
]
assets_symbols = st.multiselect(
"Assets:",
sorted(symbols)
)
st.markdown("#### Select the start date to make the analysys:")
start_date = st.date_input(
"Start Date - Note: The end date will be the current time."
)
end_date = datetime.today().date()
if start_date >= end_date:
st.error('Error! Invalid date interval.')
dwn_data_btn = st.button('Download data')
analyzer = Portfolio_Analyzer()
assets_data = None
if dwn_data_btn:
assets_data = analyzer.get_price_data(
symbols=assets_symbols,
start_date=start_date,
end_date=end_date
)
st.markdown('##### Downloaded assets:')
st.table(assets_data.head())
assets_data.to_csv('opt_results/downloaded_data.csv')
st.markdown('#### Capital allocation ($): ')
allocation = st.number_input(
'Allocation',
min_value=0,
value=1
)
st.markdown('#### Risk-Free Rate:')
risk_free_rate = st.number_input(
'Risk-Free',
0.0,
1.0,
value=0.0697,
step=0.01
)
st.header("GA Parameters")
num_generations = int(st.number_input(
'Number of Generations: ',
1,
value=50
))
sol_per_pop = int(st.number_input(
'Number of solutions in the population: ',
1,
value=40
))
num_parents_mating = int(st.number_input(
'Number of solutions to be selected as parents in the mating pool: ',
1,
max_value=int(sol_per_pop),
value=15
))
num_genes = len(assets_symbols)
solution = None
run_sim_btn = st.button('Run Simulation')
if run_sim_btn:
assets_data = pd.read_csv('opt_results/downloaded_data.csv')
assets_data['Date'] = | pd.to_datetime(assets_data['Date']) | pandas.to_datetime |
import unittest
import tempfile
import numpy as np
import pandas as pd
from supervised.preprocessing.preprocessing_exclude_missing import (
PreprocessingExcludeMissingValues,
)
class PreprocessingExcludeMissingValuesTest(unittest.TestCase):
def test_transform(self):
d_test = {
"col1": [1, 1, np.nan, 3],
"col2": ["a", "a", np.nan, "a"],
"col3": [1, 1, 1, 3],
"col4": ["a", "a", "b", "c"],
"y": [np.nan, 1, np.nan, 2],
}
df_test = | pd.DataFrame(data=d_test) | pandas.DataFrame |
''' Toro 1996 method for randomizing shear wave velocity
DESCRIPTION:
Toro Method is a first order auto-regressive model used to randomize shear wave
velocity. Note that the functions here are QUITE simplified, because the
interlayer correlation coefficient is assumed constant with depth. Maybe one day
I'll code everything in, but I just don't have the need for it right now.
The workflow is as follows:
1. We start with a dataframe "all_data" which contains data for shear wave
velocity profiles. Must have columns ['name', 'depth', 'vs']
2. The data is paired into adjecent layers in the dataframe "paired_data". A
maximum threshold is established for two layers to be considered "adjecent"
(see function: get_paired_data).
3. Interlayer correlation coefficients are calculated for the paired data,
in the dataframe "IL_corr_coeffs". Here, depth bins may be specified so that
paired data with a range of "mid_depth" are considered. A minimum number of
points (min_pts) may be specified as a requirement to calculate corr coeffs.
(see function: get_IL_corr_coeffs).
'''
import numpy as np
import pandas as pd
# ------------------------------------------------------------------------------
# Main Functions
# ------------------------------------------------------------------------------
def get_Toro_standrd_corr(site_class):
# TODO
pass
def gen_toro_realization(u_lnvs, corr_coeffs, sigma_lnvs):
''' TODO - document
u_lnvs = array of length n with mean for each layer
corr_coeff = array of length (n-1) with interlayer correlations
sigma_lnvs = float with standard deviation
'''
Z = np.empty_like(u_lnvs)
Z[0] = np.random.normal(0, 1)
for i, rho in enumerate(corr_coeffs):
epsilon = np.random.normal(0, 1)
Z[i + 1] = rho * Z[i] + epsilon * (1 - rho**2) ** 0.5
Vs = np.exp(u_lnvs + Z * sigma_lnvs)
return Z, Vs
def get_IL_corr(all_data:pd.DataFrame , dbin_edges:np.array,
dintv_max:float = 1, min_pts:float = 10) -> pd.DataFrame:
''' Calculates interlayer correlation coefficients for paired data
Purpose
-------
Given adjecent Vs measurements and a corresponding mid_depth, this
calculates the interlayer correlation coefficient in depth bins.
Parameters
----------
all_data : pandas dataframe
Dataframe with shear wave velocity profiles to be processed.
Must at least have the columns: ['name', 'depth', 'vs']
where 'name' is used to separate sdata from different tests
dbin_edges : numpy array
Depth intervals to be used in the depth bins (edges).
dintv_max : float (defaults to 1)
Maximum distance between two adjecent layers that is allowed in order
to considered the measurements a "pair".
min_pts : float (defaults to 10)
Minimum number of points required to report a correlation coefficient.
Returns
-------
paired_data : pandas dataframe
Dataframe with shear wave velocity profiles to be processed.
Must at least have the columns: ['name', 'depth', 'vs']
where 'name' is used to separate data from different tests
IL_corr_coeffs : pandas dataframe
Dataframe with reuslts of correlation coefficient and number of points
for each depth bin.
Notes
-----
* This is a simplification of Toro's correlation coefficients, since it is
assumed that the thickness of the layers is constant. This is a fair
assumption for SCPTs, but may not be the case for other types of tests.
'''
# First, get paired data
paired_data = get_paired_data(all_data, dintv_max)
# Initalize output dataframe
out_cols = ['mid_depth', 'IL_corr_coeff', 'num_pts']
IL_corr_coeffs = pd.DataFrame({}, columns = out_cols)
# Iterate through the depth bins
for d_from, d_to in zip(dbin_edges[:-1], dbin_edges[1:]):
# Get the middle depth of this bin and establish a mask
mid_depth = (d_from + d_to) / 2
mask = (paired_data['mid_depth'] >= d_from) & \
(paired_data['mid_depth'] < d_to)
# Get the number of datapoints and paired data
n = np.sum(mask)
prev_vs = paired_data.loc[mask, 'prev_vs'].values
next_vs = paired_data.loc[mask, 'next_vs'].values
# If there are less than min_pts data points, don't report correlations
if n < min_pts:
rho = np.nan
# Othewise, calculate it (checked by hand and it look good :)
else:
rho = np.corrcoef(np.stack([prev_vs, next_vs], axis = 0))[0,1]
# Append outputs to correlation coefficient dataframe
outputs = {'mid_depth': mid_depth, 'IL_corr_coeff':rho, 'num_pts':n}
IL_corr_coeffs = IL_corr_coeffs.append(outputs, ignore_index = True)
return paired_data, IL_corr_coeffs
# ------------------------------------------------------------------------------
# Helper Functions
# ------------------------------------------------------------------------------
def get_paired_data(all_data:pd.DataFrame, dintv_max:float = 1) -> pd.DataFrame:
''' Creates pairs of adjecent Vs layers for the provided data
Purpose
-------
Utility function that pairs data in the dataframe "all_data", which is
assumed to contain shear wave velocity profiles for many SCPTs or similar
tests.
If two adjecent measurements are farther than "dintv_max" apart, then
the pair will not be added to the paired data.
Parameters
----------
all_data : pandas dataframe
Dataframe with shear wave velocity profiles to be processed.
Must at least have the columns: ['name', 'depth', 'vs']
where 'name' is used to separate data from different tests
dintv_max : float (optional)
Maximum distance between two adjecent layers that is allowed in order
to considered the measurements a "pair". Defaults to 1.
Returns
-------
paired_data : pandas dataframe
Dataframe with paired data, with columns:
'mid_depth': corresponding to the average depth of adjecent layers
'prev_vs' : shear wave velocity in shallower layer
'next_vs' : shear wave velocity in deeper layer
Notes
-----
* Not the most efficient!! Because it iterates through each row in all_data.
Couldn't figure out a way to do without a loop, and still check that the
d_intv requirements are met and that we're not comibining different
soundings. Might be worth to try again.
'''
# Check that the necessary columns exist in df_data
for req_col in ['name', 'depth', 'vs']:
if req_col not in list(all_data):
raise Exception('df_data is missing column: ' + req_col)
# First, generate depth interval column for all SCPTS (or similar)
for _, one_cpt_data in all_data.groupby('name'):
depth = one_cpt_data['depth'].values
dintv = np.concatenate([[np.nan], depth[1:] - depth[:-1] ], axis = 0)
all_data.loc[one_cpt_data.index, 'dintv'] = dintv
# Initialize output dataframe
paired_data = | pd.DataFrame({}, columns=['mid_depth', 'prev_vs', 'next_vs']) | pandas.DataFrame |
import pandas as pd
import datetime
import os
from textblob import TextBlob
stockIndex = pd.read_excel("./BSIFinal.xlsx")
stockIndexDF = | pd.DataFrame(stockIndex) | pandas.DataFrame |
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
TechIndicator
Calculates various technical indicators and associated trading signals.
"""
import pandas
import numpy
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
class TechIndicator:
def __init__(self):
self.logger = LoggerManager().getLogger(__name__)
self._techind = None
self._signal = None
def create_tech_ind(self, data_frame_non_nan, name, tech_params, data_frame_non_nan_early = None):
self._signal = None
data_frame = data_frame_non_nan.fillna(method="ffill")
if data_frame_non_nan_early is not None:
data_frame_early = data_frame_non_nan_early.fillna(method="ffill")
if name == "SMA":
if (data_frame_non_nan_early is not None):
# calculate the lagged sum of the n-1 point
rolling_sum = pandas.rolling_sum(data_frame.shift(1), tech_params.sma_period - 1)
# add non-nan one for today
rolling_sum = rolling_sum + data_frame_early
# calculate average = sum / n
self._techind = rolling_sum / tech_params.sma_period
narray = numpy.where(data_frame_early > self._techind, 1, -1)
else:
self._techind = pandas.rolling_mean(data_frame, tech_params.sma_period)
narray = numpy.where(data_frame > self._techind, 1, -1)
self._signal = pandas.DataFrame(index = data_frame.index, data = narray)
self._signal.loc[0:tech_params.sma_period] = numpy.nan
self._signal.columns = [x + " SMA Signal" for x in data_frame.columns.values]
self._techind.columns = [x + " SMA" for x in data_frame.columns.values]
elif name == "ROC":
if (data_frame_non_nan_early is not None):
self._techind = data_frame_early / data_frame.shift(tech_params.roc_period) - 1
else:
self._techind = data_frame / data_frame.shift(tech_params.roc_period) - 1
narray = numpy.where(self._techind > 0, 1, -1)
self._signal = pandas.DataFrame(index = data_frame.index, data = narray)
self._signal.loc[0:tech_params.roc_period] = numpy.nan
self._signal.columns = [x + " ROC Signal" for x in data_frame.columns.values]
self._techind.columns = [x + " ROC" for x in data_frame.columns.values]
elif name == "SMA2":
sma = pandas.rolling_mean(data_frame, tech_params.sma_period)
sma2 = pandas.rolling_mean(data_frame, tech_params.sma2_period)
narray = numpy.where(sma > sma2, 1, -1)
self._signal = pandas.DataFrame(index = data_frame.index, data = narray)
self._signal.columns = [x + " SMA2 Signal" for x in data_frame.columns.values]
sma.columns = [x + " SMA" for x in data_frame.columns.values]
sma2.columns = [x + " SMA2" for x in data_frame.columns.values]
most = max(tech_params.sma_period, tech_params.sma2_period)
self._signal.loc[0:most] = numpy.nan
self._techind = pandas.concat([sma, sma2], axis = 1)
elif name in ['RSI']:
# delta = data_frame.diff()
#
# dUp, dDown = delta.copy(), delta.copy()
# dUp[dUp < 0] = 0
# dDown[dDown > 0] = 0
#
# rolUp = pandas.rolling_mean(dUp, tech_params.rsi_period)
# rolDown = pandas.rolling_mean(dDown, tech_params.rsi_period).abs()
#
# rsi = rolUp / rolDown
# Get the difference in price from previous step
delta = data_frame.diff()
# Get rid of the first row, which is NaN since it did not have a previous
# row to calculate the differences
delta = delta[1:]
# Make the positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# Calculate the EWMA
roll_up1 = pandas.stats.moments.ewma(up, tech_params.rsi_period)
roll_down1 = pandas.stats.moments.ewma(down.abs(), tech_params.rsi_period)
# Calculate the RSI based on EWMA
RS1 = roll_up1 / roll_down1
RSI1 = 100.0 - (100.0 / (1.0 + RS1))
# Calculate the SMA
roll_up2 = pandas.rolling_mean(up, tech_params.rsi_period)
roll_down2 = pandas.rolling_mean(down.abs(), tech_params.rsi_period)
# Calculate the RSI based on SMA
RS2 = roll_up2 / roll_down2
RSI2 = 100.0 - (100.0 / (1.0 + RS2))
self._techind = RSI2
self._techind.columns = [x + " RSI" for x in data_frame.columns.values]
signal = data_frame.copy()
sells = (signal.shift(-1) < tech_params.rsi_lower) & (signal > tech_params.rsi_lower)
buys = (signal.shift(-1) > tech_params.rsi_upper) & (signal < tech_params.rsi_upper)
# print (buys[buys == True])
# buys
signal[buys] = 1
signal[sells] = -1
signal[~(buys | sells)] = numpy.nan
signal = signal.fillna(method = 'ffill')
self._signal = signal
self._signal.loc[0:tech_params.rsi_period] = numpy.nan
self._signal.columns = [x + " RSI Signal" for x in data_frame.columns.values]
elif name in ["BB"]:
## calcuate Bollinger bands
mid = pandas.rolling_mean(data_frame, tech_params.bb_period); mid.columns = [x + " BB Mid" for x in data_frame.columns.values]
std_dev = | pandas.rolling_std(data_frame, tech_params.bb_period) | pandas.rolling_std |
# Copyright © 2019 <NAME>
"""
Test for the ``preprocess._aggregate_columns._difference`` module.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...clean_variables import VariableCleaner
class PreprocessConstantDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module. Assert final data frames match expectations.
"""
@staticmethod
def test_clean_difference_ints_0():
"""Test subtracting 0 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [1, 2, 3]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_ints_1():
"""Test subtracting 1 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [0, 1, 2]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 1}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_0():
"""Test subtracting 0.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [1.0, 2.0, 3.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_negative_1():
"""Test subtracting -1.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [2.0, 3.0, 4.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": -1.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
class PreprocessVariableDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module with column subtraction.
"""
@staticmethod
def test_clean_difference_int_column():
"""Test subtracting the right column from the left."""
_input = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1, -1, -1], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_right_string_column():
"""Test subtracting the right column from the left. Right column has strings."""
_input = DataFrame({"A": [1, 2, 3], "B": ["2", "3", "4"]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": ["2", "3", "4"]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_left_string_column():
"""Test subtracting the right column from the left. Left column has strings."""
_input = DataFrame({"A": ["1", "2", "3"], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
| assert_frame_equal(_expected, _vc.frame) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
"""
Author: <NAME>
Date: 04/05/2020
Function: Calls to an external C++ program
Description:
============
This calls out to an external C++ program with some data entered purely for inout/output testing
The return of this external program is a csv style stream
There is code commented out that can be used in testing outside of the cgi server environment
"""
#************************************************************************************************
import subprocess as sub
import pandas as pd
from io import StringIO as sio
import sys
import Config as cfg
def getFile(filename, url):
#import urllib.request
#urllib.request.urlretrieve(url,filename)
import requests #https://stackoverflow.com/questions/32763720/timeout-a-file-download-with-python-urllib
request = requests.get(url, timeout=100, stream=True)
with open(filename, 'wb') as fh: # Open the output file and make sure we write in binary mode
count = 0
for chunk in request.iter_content(1024 * 1024*10): # Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
fh.write(chunk)
print(count,end=',')
count +=1
sys.stdout.flush()
print('Downloaded')
def getCsvFromCppResults(cppResults,ID):
startPos = cppResults.find('BEGIN_' + ID) + len('BEGIN_' + ID)
endPos = cppResults.find('END_' + ID)
if endPos > startPos:
exe_result = cppResults[startPos:endPos]
exe_data = sio(exe_result)
df = pd.read_csv(exe_data)
return df
else:
#print(startPos,endPos)
return pd.DataFrame()
def doWeHaveAllFiles(pdbCode,debug=False):
haveED = False
havePDB = False
import os
allFiles = True
#Files from the PDBE
directory = '/d/projects/u/ab002/Thesis/PhD/Data/'
origPdb = cfg.PdbDir + 'pdb' + pdbCode + '.ent'
ccp4File = cfg.Ccp4Dir + pdbCode + '.ccp4'
ccp4Diff = cfg.Ccp4Dir + pdbCode + '_diff.ccp4'
isXray = True
ccp4Num = '0'
pdbOnly = pdbCode
if pdbCode[:5] == "user_":
origPdb = cfg.UserDataPdbDir + 'pdb' + pdbCode + '.ent'
ccp4File = cfg.UserDataCcp4Dir + pdbCode + '.ccp4'
ccp4Diff = cfg.UserDataCcp4Dir + pdbCode + '.ccp4' # no diff file
elif pdbCode[:4] == 'emdb':
# Find the number and the pdb code from the format emdb_12345_1abc
inps = pdbCode.split('_')
pdbNewCode = 'emdb_' + inps[2]
ccp4NewCode = 'emdb_' + inps[1]
origPdb = cfg.EmdbPdbDir + 'pdb' + pdbNewCode + '.ent'
pdbOnly = inps[2]
ccp4FileZip = cfg.EmdbCcp4Dir + ccp4NewCode + '.map.gz'
ccp4File = cfg.EmdbCcp4Dir + ccp4NewCode + '.ccp4'
isXray = False
ccp4Num = inps[1]
if os.path.isfile(origPdb):
havePDB = True
else:
try:
getFile(origPdb,'https://www.ebi.ac.uk/pdbe/entry-files/download/pdb' + pdbOnly + '.ent')
havePDB = True
except:
havePDB = False
if os.path.isfile(ccp4File):
haveED = True
else:
try:
if isXray:
getFile(ccp4File,'https://www.ebi.ac.uk/pdbe/coordinates/files/' + pdbCode + '.ccp4')
getFile(ccp4Diff,'https://www.ebi.ac.uk/pdbe/coordinates/files/' + pdbCode +'_diff.ccp4')
else:
emdbPath = 'https://ftp.ebi.ac.uk/pub/databases/emdb/structures/EMD-' + ccp4Num + '/map/emd_' + ccp4Num + '.map.gz'
if not os.path.isfile(ccp4FileZip):
print('This file needs to be downloaded: ', emdbPath)
print('\n')
print('EMDB map files can be large, contact us if there are any problems with this file\n')
getFile(ccp4FileZip,emdbPath)
import gzip
import shutil
#https://www.codegrepper.com/code-examples/python/how+to+extract+gz+file+python
# now we need to unzip it
print('Unzipping...')
sys.stdout.flush()
with gzip.open(ccp4FileZip,'rb') as f_in:
with open(ccp4File,'wb') as f_out:
shutil.copyfileobj(f_in,f_out)
print('...unzipped')
sys.stdout.flush()
import os
os.remove(ccp4FileZip)
haveED = True
except:
haveED = False
return havePDB,haveED
def runCppModule(pdb,interpNum,Fos,Fcs,cX,cY,cZ,lX,lY,lZ,pX,pY,pZ,width,gran,D1,D2,D3,D4,D5,D6,D7,D8,D9,debug=False):
#try:
import Config as cfg
df1a,df1b,df1c = pd.DataFrame(),pd.DataFrame(),pd.DataFrame()
df2a, df2b, df2c = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
df4, df5, df6,df7 = pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame()
exePath =cfg.ExePath
if True:
### CALL PEAKS ######################################
if D1 or D2 or D3 or D4:
commandlinePeaks = "PEAKS|" + pdb + "|" + str(interpNum) + "|" + str(Fos) + "|"+ str(Fcs) + "|"
#print('...called Leucippus with params:' + commandlinePeaks + ' ...')
#sys.stdout.flush() # update the user interface
#------------------------------------------------
pigP = sub.Popen([exePath, commandlinePeaks], stdout=sub.PIPE)
resultP = pigP.communicate(input=b"This is sample text.\n")
exe_resultP = str(resultP[0],'utf-8')
pigP.kill()
#------------------------------------------------
dfInputs = getCsvFromCppResults(exe_resultP, 'USERINPUTS')
df1a = getCsvFromCppResults(exe_resultP, 'ALLPEAKS')
if len(df1a) == 0:
print("results from exe=",resultP)
return []
df1b = getCsvFromCppResults(exe_resultP, 'ATOMPEAKS')
df1c = getCsvFromCppResults(exe_resultP, 'CHIMERAPEAKS')
### CALL ATOMS ######################################
if D5 or D6:
commandlineAtoms = "ATOMSDENSITY|" + pdb + "|" + str(interpNum) + "|"+ str(Fos) + "|"+ str(Fcs) + "|"
#print('...called Leucippus with params:' + commandlineAtoms + ' ...')
#------------------------------------------------
pigA = sub.Popen([exePath, commandlineAtoms], stdout=sub.PIPE)
resultA = pigA.communicate(input=b"This is sample text.\n")
exe_resultA = str(resultA[0],'utf-8')
pigA.kill()
#------------------------------------------------
df2a = getCsvFromCppResults(exe_resultA, 'ATOMDENSITY')
### CALL ATOMS ######################################
if D7 or D8:
commandlineAtomsAdj = "ATOMSADJUSTED|" + pdb + "|" + str(interpNum) + "|"+ str(Fos) + "|"+ str(Fcs) + "|"
#print('...called Leucippus with params:' + commandlineAtoms + ' ...')
#------------------------------------------------
pigAa = sub.Popen([exePath, commandlineAtomsAdj], stdout=sub.PIPE)
resultAa = pigAa.communicate(input=b"This is sample text.\n")
exe_resultAa = str(resultAa[0],'utf-8')
pigAa.kill()
#------------------------------------------------
df2b = getCsvFromCppResults(exe_resultAa, 'DENSITYADJUSTED')
df2c = getCsvFromCppResults(exe_resultAa, 'LAPLACIANADJUSTED')
### CALL SLICES #######################################
if D9:
commandlineSlices = "SLICES|" + pdb + "|" + str(interpNum) + "|" + str(Fos) + "|"+ str(Fcs) + "|"
commandlineSlices += str(cX) + "_" + str(cY) + "_" + str(cZ) + "|"
commandlineSlices += str(lX) + "_" + str(lY) + "_" + str(lZ) + "|"
commandlineSlices += str(pX) + "_" + str(pY) + "_" + str(pZ) + "|"
commandlineSlices += str(width) + "_" + str(gran)
print('...called Leucippus with params:' + commandlineSlices + ' ...')
#------------------------------------------------
pigS = sub.Popen([exePath, commandlineSlices], stdout=sub.PIPE)
resultS = pigS.communicate(input=b"This is sample text.\n")
exe_resultS = str(resultS[0],'utf-8')
pigS.kill()
#------------------------------------------------
#dfI = getCsvFromCppResults(exe_resultS, 'USERINPUTS')
#print(dfI)
df4 = getCsvFromCppResults(exe_resultS, 'DENSITYSLICE')
df5 = getCsvFromCppResults(exe_resultS, 'RADIANTSLICE')
df6 = getCsvFromCppResults(exe_resultS, 'LAPLACIANSLICE')
df7 = getCsvFromCppResults(exe_resultS, 'POSITIONSLICE')
return [[df1a,df1b,df1c],[df2a,df2b,df2c],[df4,df5,df6,df7]]
#except:
#print("results from exe=",result)
#return []
def runCppModuleText(pdb):
commandline = "TEXTCOUT|" + pdb + "|5|-2|1|"
df1 = pd.DataFrame()
#print(commandline)
pig = sub.Popen(["/d/projects/u/ab002/Thesis/PhD/Github/PsuMaxima/Linux/build/PsuMaxima", commandline], stdout=sub.PIPE)
try:
result = pig.communicate(input=b"This is sample text.\n")
exe_result = str(result[0],'utf-8')
except:
pig.kill()
result = pig.communicate(input=b"This is sample text.\n")
exe_result = str(result[0],'utf-8')
#print(exe_result)
df1 = getCsvFromCppResults(exe_result, 'RAWTEXT')
pig.kill()
#print(df1)
return [df1]
def runCppModuleSyntheticDensity(atoms,model,cX,cY,cZ,lX,lY,lZ,pX,pY,pZ,width,gran):
#try:
df1a,df1b,df1c,df1d = pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame()
if True:
### CALL Synthetic Density ######################################
commandlineSnth = "SYNTHETIC|" + atoms + "|" + model + "|2|-1|"
commandlineSnth += str(cX) + "_" + str(cY) + "_" + str(cZ) + "|"
commandlineSnth += str(lX) + "_" + str(lY) + "_" + str(lZ) + "|"
commandlineSnth += str(pX) + "_" + str(pY) + "_" + str(pZ) + "|"
commandlineSnth += str(width) + "_" + str(gran)
#print('...called Leucippus with params:' + commandlineSnth + ' ...')
#------------------------------------------------
pigS = sub.Popen(["/d/projects/u/ab002/Thesis/PhD/Github/PsuMaxima/Linux/build/PsuMaxima", commandlineSnth], stdout=sub.PIPE)
resultS = pigS.communicate(input=b"This is sample text.\n")
exe_resultS = str(resultS[0],'utf-8')
pigS.kill()
#------------------------------------------------
#dfI = getCsvFromCppResults(exe_resultS, 'USERINPUTS')
#print(dfI)
dfI = getCsvFromCppResults(exe_resultS, 'ATOMDATA')
#print(dfI)
df1a = getCsvFromCppResults(exe_resultS, 'DENSITYSLICE')
df1b = getCsvFromCppResults(exe_resultS, 'RADIANTSLICE')
df1c = getCsvFromCppResults(exe_resultS, 'LAPLACIANSLICE')
df1d = getCsvFromCppResults(exe_resultS, 'POSITIONSLICE')
#df1d = getCsvFromCppResults(exe_resultS, 'SYNTHMATRIX')
return [df1a,df1b,df1c,df1d]
#except:
#print("results from exe=",result)
#return []
def runCppModuleSamples(pdb):
# try:
df1a, df1b = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import plotly.express
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
X = np.random.normal(10, 1, 1000)
estimator = UnivariateGaussian()
estimator = estimator.fit(X)
print(f'({estimator.mu_}, {estimator.var_})')
# Question 2 - Empirically showing sample mean is consistent
df = pd.DataFrame(columns=['Sample Size', 'Distance from Real Expectation'])
for sample_size in range(10, 1001, 10):
sample = X[:sample_size]
model = UnivariateGaussian(biased_var=False)
model = model.fit(sample)
distance = abs(model.mu_ - 10)
df = pd.concat([df, pd.DataFrame({'Sample Size': [sample_size],
'Distance from Real Expectation': [distance]})])
plotly.express.bar(df, x='Sample Size', y='Distance from Real Expectation',
title='Distance from Actual Expectation as a Function '
'of Sample Size').show()
# Question 3 - Plotting Empirical PDF of fitted model
pdf = estimator.pdf(X)
df = pd.DataFrame(zip(X, pdf), columns=['Sample Value', 'Probability'])
plotly.express.scatter(df, x='Sample Value', y='Probability',
title='PDF of Fitted Model').show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mu = np.array([0, 0, 4, 0])
sigma = np.array([[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]])
X = np.random.multivariate_normal(mu, sigma, 1000)
estimator = MultivariateGaussian()
estimator = estimator.fit(X)
print(estimator.mu_)
print(estimator.cov_)
# Question 5 - Likelihood evaluation
res = []
for f1 in np.linspace(-10, 10, 200):
for f3 in np.linspace(-10, 10, 200):
mu = np.array([f1, 0, f3, 0])
log_likelihood = MultivariateGaussian.log_likelihood(mu, sigma,
X)
res.append((f1, f3, log_likelihood))
df = | pd.DataFrame(res, columns=['f1', 'f3', 'Log Likelihood']) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import geopandas
from mapillary_image_classification.data.osm import define_categories
def split_data(df: geopandas.GeoDataFrame, num_parts: int = 4):
"""
Split a dataframe into num_parts chunks.
This can be used to produce multiple dataset files and download the data concurrently
on multiple computers.
"""
return np.array_split(df, num_parts)
def balance_data(df: geopandas.GeoDataFrame, group_size, group_cols = ['surface_category', 'smoothness_category']):
"""
Undersample groups of a dataframe so they have a maximum size of group_size.
"""
g = df.groupby(group_cols)
print(g.size())
smaller_groups_mask = g.size() < group_size
if sum(smaller_groups_mask) > 0: # if there are groups with smaller size than group_size
df_smaller = pd.concat( # save all groups which are smaller than the group_size, as these cannot be samples
[df[(df[group_cols[0]] == group_idx[0])
& (df[group_cols[1]] == group_idx[1])]
for group_idx in g.size()[smaller_groups_mask].index])
df_larger = pd.concat(
[df[(df[group_cols[0]] == group_idx[0])
& (df[group_cols[1]] == group_idx[1])]
for group_idx in g.size()[~smaller_groups_mask].index])
else:
df_larger = df
df_sample = df_larger.groupby(group_cols).sample(group_size, random_state=42) # sample from all groups which are larger than group_size
if sum(smaller_groups_mask) > 0:
return | pd.concat([df_smaller, df_sample]) | pandas.concat |
import pandas as pd
import numpy as np
from scipy.stats import bernoulli
from scipy.stats import uniform
def assign_bags(strategy='random_n_size', random_seed=None, **kwargs):
# Arguments:
# X: feature matrix, each feature vector should be represented as a row vector in the matrix
# num_bags: number of bags to make;
# will not effect the output if strategy==feature
# strategy: 'random': uniformly random with varying bag size, need arguments 'num_bags' and 'X'
# 'random_n_size': uniformly random with fixed bag size, need arguments 'num_bags' and 'X'
# 'feature': bag id is assigned based on the feature class, need arguments 'strategy_col' and 'X'
# 'multi-source': multi-source corruption i.e. given number of different bag proportions;
# need arguments 'distribution', 'y', 'pos_label';
# 'y' is the label vector
# 'distribution' is a dictionary mapping (pos_instances, neg_instances) to the
# number of bag under this distribution
# 'uniform_prop': for each bag, first generate a proportion with respect to a distribution,
# then generate the labels w.r.t Bernoulli distribution;
# need argument 'distribution', 'X', 'y', 'size', and 'pos_label';
# 'X' is the feature matrix
# 'y' is the label vector
# 'distribution' is a dictionary mapping [left_end, right_end] to the
# number of bag with this distribution
# 'bag_size' is the size of a bag
# strategy_col: if strategy is 'feature', strategy_col is the pandas Series of that column
# random_seed:
#
# Functionality:
# assign bag id each instance; will NOT modify X
#
# Returns:
# (if the strategy is 'uniform_prop', returns X, y, bag_id)
# bag_id: a numpy ndarray of bag ids, corresponding to X by location;
# bag ids are integers from 0 to X.shape[0]
if random_seed is not None:
np.random.seed(random_seed) # fix a random seed if given
# assign random bag index to instances, bag size can vary
if strategy == 'random':
num_bags = kwargs['num_bags']
X = kwargs['X']
bag_id = np.random.randint(0, high=num_bags, size=X.shape[0])
# assign random bag index to instances, bag size is fixed
elif strategy == 'random_n_size':
num_bags = kwargs['num_bags']
X = kwargs['X']
# check if the number of instances is divisible by the number of bags
assert X.shape[0] % num_bags == 0, \
"number of instances %d is not divisible by number of bags %d" % (X.shape[0], num_bags)
n = X.shape[0] // num_bags # compute the size of each bag
# assign bag index by appending integers to a 1d DataFrame and shuffling it.
bag_id = pd.DataFrame(0, index=range(n), columns=['bag_id'])
for i in range(1, num_bags):
temp = pd.DataFrame(i, index=range(n), columns=['bag_id'])
bag_id = bag_id.append(temp, ignore_index=True)
np.random.shuffle(bag_id.values)
bag_id = bag_id.values.reshape(-1, )
# this is the method used in "no label no cry" code
elif strategy == 'feature':
strategy_col = kwargs['strategy_col']
X = kwargs['X']
bag_id = | pd.Categorical(X[strategy_col]) | pandas.Categorical |
import re
import numpy as np
import pandas as pd
from nltk import WordNetLemmatizer
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from src.embeddings import load_vocab, load_embeddings
def find_all_num(data):
all_ch_c = len(data)
i = 0
all_nums = set()
while i < all_ch_c:
if data[i].isnumeric():
num = data[i]
i+=1
while data[i].isnumeric():
num = num + data[i]
i+=1
all_nums.add(num)
i+=1
print(all_nums)
def clean_text(text):
lemmatizer = WordNetLemmatizer()
text = remove_mention_url(text=text)
text = remove_entities(text=text)
text = remove_hastags(text=text)
text = lowercase(text=text)
text = remove_non_ascii(text=text)
text = add_space_latin(text=text)
text = apostrophe_handling(text=text)
text = add_space_punc(text=text)
# text = remove_numbers(text=text)
#text = remove_stop(text=text, stop=stop) # NOT NEEDED
text = reduce_words(text=text)
#text = stem_words(text=text, lemmatizer=lemmatizer)
text = text.split()
text = [w for w in text if w != '']
text = ' '.join(text)
return text
def stem_words(text, lemmatizer):
if len(text) == 0:
return text
for word in text.split():
text = text.replace(word, lemmatizer.lemmatize(word))
return text
def remove_mention_url(text):
text = re.sub('@[A-Za-z0-9_]+', '', text)
text = re.sub('URL', '', text)
return text
def remove_entities(text):
text = text.replace('<', '')
text = text.replace('>', '')
text = text.replace('&', '')
return text
def remove_hastags(text):
text = re.sub('#[A-Za-z0-9_]+', '', text)
return text
def lowercase(text):
text = text.lower()
return text
def remove_non_ascii(text):
text = text.encode('ascii', 'ignore').decode('utf-8')
return str(text)
def add_space_latin(text):
text = re.sub('([.()!"#$%&*+,-/:;<=>?@^_`{|}~])', '\\1', text)
return text
def apostrophe_handling(text):
contractions = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "i had",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she had",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
for word in text.split():
if word in contractions:
text = text.replace(word, contractions[word])
return text
def add_space_punc(text):
# pat = re.compile(r"[()!?.,:;&@#*$%^+=-]")
pat = re.compile(r"([\[()!?.,:;&@#*$%><^\"\'+=/\\\-\]])")
# text = re.sub('[()!?.,:;&@#*$%^+=-]', ' ', text)
text = pat.sub(' \\1 ', text)
return text
def remove_numbers(text):
text = re.sub("\d+", '', text)
return text
def remove_stop(text, stop):
text = text.split()
text = [w for w in text if w not in stop]
text = ' '.join(text)
return text
def reduce_words(text):
def reduced_word(w):
s = w[0]
curr_char = w[0]
curr_count = 1
for c in w[1:]:
if c == curr_char:
curr_count += 1
else:
curr_char = c
curr_count = 1
if curr_count <= 2:
s += c
else:
continue
return s
if len(text) == 0:
return text
text = reduced_word(w=text)
return text
def read_wikihow_dataset(file_path):
df = pd.read_csv(file_path)
return df["text"].values, df["headline"].values
def save_cleaned_text(texts, summaries, file_path): # np_arrays
cleaned_summaries = []
cleaned_texts = []
for i, (text, summary) in enumerate(zip(texts, summaries)):
if type(text) == float or type(summary) == float:
continue
if i % 5000 == 0:
print(f"Cleaned {i}")
cleaned_summaries.append(clean_text(summary))
cleaned_texts.append(clean_text(text))
cleaned_frame = pd.DataFrame({"text": cleaned_texts, "summary": cleaned_summaries})
if file_path is not None:
cleaned_frame.to_csv(file_path, sep = ",")
return cleaned_texts, cleaned_summaries
def find_all_with_known_words(texts, summaries, wordtoidx):
known_texts = []
known_summaries = []
for i, (text, summary) in enumerate(zip(texts, summaries)):
if type(summary) == float or type(text) == float:
continue
not_found = False
for word in text.split():
if word not in wordtoidx:
not_found = True
break
for word in summary.split():
if word not in wordtoidx:
not_found = True
break
if not_found:
continue
known_texts.append(text)
known_summaries.append(summary)
return known_texts, known_summaries
def save_known_text_summary(texts, summaries, wordtoidx, save_path):
print(f"Length before known word filter {len(texts)}")
known_texts, known_summaries = find_all_with_known_words(texts, summaries, wordtoidx)
print(f"Length After known word filter {len(known_summaries)}")
df = pd.DataFrame({"text": known_texts, "summary": known_summaries})
if save_path is not None:
df.to_csv(save_path, sep= ",")
return known_texts, known_summaries
def clean_wikihow():
print("Reading started.")
texts, summaries = read_wikihow_dataset("data/wikihow.csv")
print("Reading complete.")
save_cleaned_text(texts, summaries, "data/wikihow_clean.csv")
print("Cleaning complete.")
def plot_word_count_stats(file_path):
df = pd.read_csv(file_path)
word_count = {}
for text in df["text"]:
cnt = len(text.split())
if cnt not in word_count:
word_count[cnt] = 0
word_count[cnt] += 1
k = []
v = []
for cnt in word_count:
k.append(cnt)
v.append(word_count[cnt])
plt.scatter(k, v, alpha=0.3)
plt.legend()
plt.show()
def clip_summary_word_count(file_path, word_count, target_file_path):
df = pd.read_csv(file_path)
summaries = []
texts = []
for text, summary in zip(df["text"], df["summary"]):
if len(summary.split()) > word_count:
continue
summaries.append(summary)
texts.append(text)
print(f"total exemplars after clipping: {len(texts)}")
pd.DataFrame({"text": texts, "summary": summaries}).to_csv(target_file_path, sep = ",")
def add_start_end(file_path):
df = pd.read_csv(file_path)
texts = []
summaries = []
for text, summary in zip(df["text"], df["summary"]):
if type(text) == float or type(summary) == float:
continue
summaries.append("<start> " + summary + " <end>")
texts.append("<start> " + text + " <end>")
pd.DataFrame({"text": texts, "summary": summaries}).to_csv(file_path, sep = ",")
def all_known_count(emb_path, data_path):
_, vocab = load_embeddings(emb_path, 50)
df = pd.read_csv(data_path, sep=",")
sum_count = 0
for text, summary in zip(df["text"], df["summary"]):
not_found = False
if type(text) == float or type(summary) == float:
continue
for word in text.split():
if word not in vocab:
not_found = True
if not not_found:
for word in summary.split():
if word not in vocab:
not_found = True
if not_found:
sum_count += 1
print(f"Total known reviews: {sum_count}")
def filter_with_word_count(texts, summaries, word_count_t, word_count_s):
filtered_texts = []
filtered_summaries = []
for text, summary in zip(texts, summaries):
ln = len(text.split())
lns = len(text.split())
if ln > word_count_t:
continue
if lns > word_count_s:
continue
filtered_summaries.append(summary)
filtered_texts.append(text)
return filtered_texts, filtered_summaries
def final_preprocessing():
# clean data
# find all with existing emb
# add start end
# save
df = | pd.read_csv("data/wikihow.csv") | pandas.read_csv |
import codecademylib3_seaborn
from bs4 import BeautifulSoup
import requests
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
print("some")
webpage_response = requests.get("https://s3.amazonaws.com/codecademy-content/courses/beautifulsoup/cacao/index.html")
webpage = webpage_response.content
soup=BeautifulSoup(webpage,"html.parser")
ratings = []
rating = soup.find_all(attrs={"class":"Rating"})
for rate in rating[1:]:
ratings.append(float(rate.get_text()))
print(ratings)
plt.hist(ratings)
plt.show()
companies = soup.select(".Company")
all_company = []
for company in companies[1:]:
all_company.append(company.get_text())
print(all_company)
data = {"Company":all_company, "Rating":ratings}
df = pd.DataFrame.from_dict(data)
mean_vals = df.groupby("Company").Rating.mean()
ten_best = mean_vals.nlargest(10)
print(ten_best)
cocoa_percents = []
cocoa_percent_tags = soup.select(".CocoaPercent")
for td in cocoa_percent_tags[1:]:
percent = float(td.get_text().strip('%'))
cocoa_percents.append(percent)
print(cocoa_percents)
data = {"Company":all_company, "Rating":ratings, "CocoaPercentage":cocoa_percents}
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import functools
from io import BytesIO
import pickle
import gzip
from pathlib import Path
from functools import cached_property
from dataclasses import dataclass
from PIL import Image
import json
from pandas._libs.tslibs import Timedelta
import torch
from collections import Counter
import functools
import random
from torch.nn.utils.rnn import pad_sequence
from src.data.extract_data import RAW_FILE_NAME, get_file_tree, get_trace_data
import zipfile
import pandas as pd
from tqdm import tqdm
from torch.utils.data.sampler import BatchSampler, RandomSampler
from torch.utils.data import DataLoader, Dataset
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
project_dir = Path(__file__).resolve().parents[2]
# project_dir = Path("/work3/s164221")
raw_path = project_dir / "data" / "raw"
interim_path = project_dir / "data" / "interim"
processed_path = project_dir / "data" / "processed"
def get_loader(dataset, batch_size, pin_memory=False, generator=None):
sampler = BatchSampler(
RandomSampler(dataset, generator=generator),
batch_size=batch_size,
drop_last=False,
)
return DataLoader(
dataset,
batch_size=None,
sampler=sampler,
pin_memory=pin_memory,
)
@dataclass
class TestDataset:
pass
class SiteDataset(Dataset):
def __init__(self, site_id: str, **kwargs) -> None:
self.site_id = site_id
file_tree = get_file_tree()
floor_ids = file_tree["train"][self.site_id]
self.floors = [
FloorDataset(self.site_id, floor_id, **kwargs) for floor_id in floor_ids
]
class FloorDataset(Dataset):
def __init__(
self,
site_id: str,
floor_id: str,
sampling_interval=100,
wifi_threshold=100,
include_wifi=True,
include_beacon=False,
validation_percent=None,
test_percent=None,
split_seed=123,
) -> None:
self.unpadded_tensors = None
self.site_id = site_id
self.floor_id = floor_id
self.sampling_interval = sampling_interval
self.wifi_threshold = wifi_threshold
self.include_wifi = include_wifi
self.include_beacon = include_beacon
file_tree = get_file_tree()
trace_ids = file_tree["train"][self.site_id][self.floor_id]
self.traces = [
TraceData(self.site_id, self.floor_id, trace_id, sampling_interval)
for trace_id in trace_ids
]
# ---- TEST TRAIN SPLIT -----
trace_indices = set(range(len(self.traces)))
self.validation_mask = torch.full((len(self.traces),), False)
self.test_mask = torch.full((len(self.traces),), False)
if validation_percent is not None or test_percent is not None:
random.seed(split_seed)
if validation_percent is not None:
validation_indices = random.choices(
list(trace_indices), k=int(len(self.traces) * validation_percent)
)
trace_indices.difference_update(validation_indices)
self.validation_mask[validation_indices] = True
if test_percent is not None:
test_indices = random.choices(
list(trace_indices), k=int(len(self.traces) * test_percent)
)
trace_indices.difference_update(test_indices)
self.test_mask[test_indices] = True
@cached_property
def image(self):
image_path = Path("metadata") / self.site_id / self.floor_id / "floor_image.png"
with zipfile.ZipFile(raw_path / RAW_FILE_NAME) as zip_file:
file_path = zipfile.Path(zip_file) / image_path
with file_path.open("rb") as f:
bytes_ = BytesIO(f.read())
return Image.open(bytes_)
@cached_property
def info(self):
info_path = Path("metadata") / self.site_id / self.floor_id / "floor_info.json"
with zipfile.ZipFile(raw_path / RAW_FILE_NAME) as zip_file:
file_path = zipfile.Path(zip_file) / info_path
with file_path.open("r") as f:
return json.load(f)
def __len__(self):
return len(self.traces)
def __getitem__(self, indices):
(
time_unpadded,
position_unpadded,
wifi_unpadded,
beacon_unpadded,
) = self._generate_tensors()
mini_batch_index = indices
mini_batch_length = torch.tensor(
[len(time_unpadded[i]) for i in indices], device=device
)
mini_batch_time = pad_sequence(
[time_unpadded[i] for i in indices], batch_first=True
)
mini_batch_position = pad_sequence(
[position_unpadded[i] for i in indices], batch_first=True
)
mini_batch_position_mask = ~mini_batch_position.isnan().any(dim=-1)
for i, length in enumerate(mini_batch_length):
mini_batch_position_mask[i, length:] = False
mini_batch_validation_mask = self.validation_mask[mini_batch_index]
mini_batch_test_mask = self.test_mask[mini_batch_index]
mini_batch_position_mask[mini_batch_validation_mask, :] = False
mini_batch_position_mask[mini_batch_test_mask, :] = False
mini_batch_position[~mini_batch_position_mask] = 0
out_tensors = [
mini_batch_index,
mini_batch_length,
mini_batch_time,
mini_batch_position,
mini_batch_position_mask,
]
if self.include_wifi:
mini_batch_wifi = pad_sequence(
[wifi_unpadded[i] for i in indices], batch_first=True
)
mini_batch_wifi_mask = ~mini_batch_wifi.isnan()
for i, length in enumerate(mini_batch_length):
mini_batch_wifi_mask[i, length:, :] = False
mini_batch_wifi[~mini_batch_wifi_mask] = 0
out_tensors.extend([mini_batch_wifi, mini_batch_wifi_mask])
if self.include_beacon:
mini_batch_beacon = pad_sequence(
[beacon_unpadded[i] for i in indices], batch_first=True
)
mini_batch_beacon_mask = ~mini_batch_beacon.isnan()
for i, length in enumerate(mini_batch_length):
mini_batch_beacon_mask[i, length:, :] = False
mini_batch_beacon[~mini_batch_beacon_mask] = 0
out_tensors.extend([mini_batch_beacon, mini_batch_beacon_mask])
return out_tensors
@property
def K(self):
if hasattr(self, "bssids_"):
return len(self.bssids_)
else:
self._generate_tensors()
return len(self.bssids_)
@property
def B(self):
if hasattr(self, "beacon_ids_"):
return len(self.beacon_ids_)
else:
self._generate_tensors()
return len(self.beacon_ids_)
def _generate_tensors(self):
if self.unpadded_tensors is not None:
return self.unpadded_tensors
sub_path = Path("train") / self.site_id / self.floor_id
cached_path = (processed_path / sub_path).with_suffix(".pt")
if cached_path.exists():
data_parameters, bssids, beacon_ids, data_tensors_unpadded = torch.load(
cached_path, map_location=device
)
if data_parameters == (self.sampling_interval, self.wifi_threshold):
self.bssids_ = bssids
self.beacon_ids_ = beacon_ids
data_tensors_unpadded = [
[y.to(device=device) for y in x] for x in data_tensors_unpadded
]
self.unpadded_tensors = data_tensors_unpadded
return data_tensors_unpadded
time_unpadded = []
position_unpadded = []
wifi_unaligned = []
beacon_unaligned = []
for trace in tqdm(self.traces):
time, position, wifi, beacon = trace[0]
time_unpadded.append(time)
position_unpadded.append(position)
wifi_unaligned.append((trace.bssids_, wifi))
beacon_unaligned.append((trace.beacon_ids_, beacon))
## Aligning floor wide wifi signals
bssid_counter = Counter()
for bssids_, wifi in wifi_unaligned:
bssid_counter.update(dict(zip(bssids_, (~wifi.isnan()).sum(0))))
self.bssids_ = sorted(
i for i, j in bssid_counter.items() if j >= self.wifi_threshold
)
bssid_to_index = {j: i for i, j in enumerate(self.bssids_)}
wifi_unpadded = []
for bssids, wifi in wifi_unaligned:
wifi_aligned = torch.full(
(wifi.shape[0], len(self.bssids_)), float("nan"), dtype=wifi.dtype
)
old_index, old_bssid, = zip(
*[
(i, bssid)
for i, bssid in enumerate(bssids)
if bssid in bssid_to_index
]
)
new_index = [bssid_to_index[bssid] for bssid in old_bssid]
wifi_aligned[:, new_index] = wifi[:, old_index]
wifi_unpadded.append(wifi_aligned)
self.beacon_ids_ = sorted(
set(
beacon_id
for (beacon_ids, beacon) in beacon_unaligned
for beacon_id in beacon_ids
)
)
beacon_id_to_index = {j: i for i, j in enumerate(self.beacon_ids_)}
beacon_unpadded = []
for (beacon_ids, beacon) in beacon_unaligned:
beacon_aligned = torch.full(
(beacon.shape[0], len(self.beacon_ids_)),
float("nan"),
dtype=beacon.dtype,
)
beacon_aligned[
:, [beacon_id_to_index[beacon_id] for beacon_id in beacon_ids]
] = beacon
beacon_unpadded.append(beacon_aligned)
data_tensors_unpadded = (
time_unpadded,
position_unpadded,
wifi_unpadded,
beacon_unpadded,
)
cached_path.parent.mkdir(parents=True, exist_ok=True)
data_parameters = (self.sampling_interval, self.wifi_threshold)
torch.save(
(data_parameters, self.bssids_, self.beacon_ids_, data_tensors_unpadded),
cached_path,
)
data_tensors_unpadded = [
[y.to(device=device) for y in x] for x in data_tensors_unpadded
]
self.unpadded_tensors = data_tensors_unpadded
return data_tensors_unpadded
class TraceData:
"""Data for a single trace"""
def __init__(self, site_id, floor_id, trace_id, sampling_interval=100) -> None:
self.site_id = site_id
self.floor_id = floor_id
self.trace_id = trace_id
self.sampling_interval = sampling_interval
@property
def data(self):
"""Data in pandas format"""
return self._get_zipped_data()
def _get_zipped_data(self, cache=True):
"""Loads data from zip file into pandas format"""
sub_path = Path("train") / self.site_id / self.floor_id / self.trace_id
cached_path = (interim_path / sub_path).with_suffix(".pkl.gz")
if cached_path.exists():
with gzip.open(cached_path, "rb") as f:
return pickle.load(f)
data = get_trace_data(sub_path.with_suffix(".txt"))
if cache:
cached_path.parent.mkdir(parents=True, exist_ok=True)
with gzip.open(cached_path, "wb") as f:
pickle.dump(data, f)
return data
def __len__(self):
return 1
def __getitem__(self, idx):
if idx != 0:
raise IndexError
data_tensors = self._generate_tensors()
return data_tensors
def _generate_tensors(self):
sub_path = Path("train") / self.site_id / self.floor_id / self.trace_id
cached_path = (processed_path / sub_path).with_suffix(".pkl.gz")
if cached_path.exists():
with gzip.open(cached_path, "rb") as f:
sampling_interval, bssids, beacon_ids, data_tensors = pickle.load(f)
if self.sampling_interval == sampling_interval:
self.bssids_ = bssids
self.beacon_ids_ = beacon_ids
return data_tensors
data_frames = self._get_zipped_data(cache=False)
position_df = data_frames["TYPE_WAYPOINT"]
position_df = position_df.rename(columns=lambda x: f"pos:{x}")
# ---- WIFI ----
wifi_df = data_frames["TYPE_WIFI"]
def _apply(group):
bssid = group["bssid"].iloc[0]
return pd.Series(group["rssi"], name=f"wifi:{bssid}")
wifi_grouped = wifi_df.groupby("bssid").apply(_apply)
wifi_timestamps = sorted(wifi_grouped.index.get_level_values(1).unique())
wifi_split = pd.DataFrame(index=wifi_timestamps)
wifi_split.index.name = "time"
self.bssids_ = wifi_grouped.index.get_level_values(0).unique().to_list()
for bssid in self.bssids_:
try:
wifi_split[bssid] = wifi_grouped[bssid]
except ValueError:
# Sometimes more than one observation per time
wifi_split[bssid] = wifi_grouped[bssid][
~wifi_grouped[bssid].index.duplicated()
]
# ---- Beacons ----
beacon_df = data_frames.get("TYPE_BEACON")
if beacon_df is not None:
def _apply(group):
beacon_id = group["uuid"].iloc[0]
return | pd.Series(group["distance"], name=f"beacon:{beacon_id}") | pandas.Series |
import unittest
from datetime import datetime, timezone
from parameterized import parameterized
import pandas as pd
if __package__:
from ..ohlc import OHLC
else:
from aiokraken.model.ohlc import OHLC
"""
Test module.
This is intended for extensive testing, using parameterized, hypothesis or similar generation methods
For simple usecase examples, we should rely on doctests.
"""
class TestOHLC(unittest.TestCase):
@parameterized.expand([
[pd.DataFrame( # One with "time" columns (like data from outside)
# TODO: proper Time, proper currencies...
[[1567039620, 8746.4, 8751.5, 8745.7, 8745.7, 8749.3, 0.09663298, 8],
[1567039680, 8745.7, 8747.3, 8745.7, 8747.3, 8747.3, 0.00929540, 1]],
# grab that from kraken documentation
columns=["time", "open", "high", "low", "close", "vwap", "volume", "count"]
), 1567041780],
[pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper Time, proper currencies...
[[datetime.fromtimestamp(1567039620, tz=timezone.utc), 8746.4, 8751.5, 8745.7, 8745.7, 8749.3, 0.09663298, 8],
[datetime.fromtimestamp(1567039680, tz=timezone.utc), 8745.7, 8747.3, 8745.7, 8747.3, 8747.3, 0.00929540, 1]],
# grab that from kraken documentation
columns=["datetime", "open", "high", "low", "close", "vwap", "volume", "count"]
).set_index("datetime"), 1567041780],
])
def test_load_ok(self, df, last):
""" Verifying that expected data parses properly """
ohlc = OHLC(data=df, last=last)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(ohlc.dataframe[col]) for col in num_cols)
assert ohlc.dataframe.index.name == "datetime"
# Verify we have a timezone aware, ns precision datetime.
assert ptypes.is_datetime64tz_dtype(ohlc.dataframe.index.dtype)
assert | ptypes.is_datetime64_ns_dtype(ohlc.dataframe.index.dtype) | pandas.api.types.is_datetime64_ns_dtype |
import pickle
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
from scipy import stats
from datetime import datetime
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import math
import re
def prediction_cycling(weight, duration, sports):
cycling_data = {'weight':[130,130,130,130,130,130,155,155,155,155,155,155,180,180,180,180,180,180,205,205,205,205,205,205],
'intensity/level':['<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph','<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph','<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph','<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph'],
'calories':[236, 944, 354, 472, 590, 708, 281, 1126, 422, 563, 704, 844, 327, 1308, 490, 654, 817, 981, 372, 1489, 558, 745, 931, 1117]}
cycling_df = pd.DataFrame(cycling_data)
cycling_df['intensity'] = [0 if x == '<10 mph' else 1 if x == '10-11.9 mph' else 2 if x == '12-13.9 mph' else 3 if x == '14-15.9 mph' else 4 if x == '16-19 mph' else 5 for x in cycling_df['intensity/level']]
cycling_X = cycling_df[["weight","intensity"]]
cycling_y = cycling_df[["calories"]]
cycling_X_train,cycling_X_test, cycling_y_train,cycling_y_test = train_test_split(cycling_X,cycling_y,test_size=0.2,random_state=42)
model1 = LinearRegression()
model1.fit(cycling_X_train,cycling_y_train)
cycling_y_pred = model1.predict([[weight, sports]])/60*duration
return cycling_y_pred
def prediction_running(weight, duration, sports):
running_data = {'weight':[130,130,130,130,130,130,130,130,130,130,130,155,155,155,155,155,155,155,155,155,155,155,180,180,180,180,180,180,180,180,180,180,180,205,205,205,205,205,205,205,205,205,205,205],
'intensity/level': ['5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph','5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph','5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph','5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph'],
'calories': [472, 531, 590, 649, 679, 738, 797, 826, 885, 944, 1062, 563, 633, 704, 774, 809, 880,950, 985, 1056, 1126, 1267, 654, 735, 817, 899,940, 1022, 1103, 1144, 1226, 1308, 1471, 745, 838, 931, 1024, 1070, 1163, 1256, 1303, 1396, 1489, 1675]}
running_df = pd.DataFrame(running_data)
running_df['intensity'] = [0 if x == '5 mph' else 1 if x == '5.2 mph' else 2 if x == '6 mph' else 3 if x == '6.7 mph' else 4 if x == '7 mph' else 5 if x == '7.5 mph' else 6 if x == '8 mph' else 7 if x == '8.6 mph' else 8 if x == '9 mph' else 9 if x == '10 mph' else 10 for x in running_df['intensity/level']]
running_X = running_df[["weight","intensity"]]
running_y = running_df[["calories"]]
running_X_train,running_X_test, running_y_train,running_y_test = train_test_split(running_X,running_y,test_size=0.2,random_state=42)
model2 = LinearRegression()
model2.fit(running_X_train,running_y_train)
running_y_pred = model2.predict([[weight, sports]])/60*duration
return running_y_pred
def prediction_walking(weight, duration, sports):
walking_data = {'weight':[130,130,130,130,130,130,130,155,155,155,155,155,155,155,180,180,180,180,180,180,180,205,205,205,205,205,205,205],
'intensity/level':['2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph','2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph', '2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph', '2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph'],
'calories': [148,177,195,224,295,372,472,176,211,232,267,352,443,563,204,245,270,311,409,515,654,233,279,307,354,465,586,745]}
walking_df = pd.DataFrame(walking_data)
walking_df['intensity'] = [0 if x == '2.0 mph' else 1 if x == '2.5 mph' else 2 if x == '3.0 mph' else 3 if x == '3.5 mph' else 4 if x == '4.0 mph' else 5 if x == '4.5 mph' else 6 for x in walking_df['intensity/level']]
walking_X = walking_df[["weight","intensity"]]
walking_y = walking_df[["calories"]]
walking_X_train,walking_X_test, walking_y_train,walking_y_test = train_test_split(walking_X,walking_y,test_size=0.2,random_state=42)
model3 = LinearRegression()
model3.fit(walking_X_train,walking_y_train)
walking_y_pred = model3.predict([[weight, sports]])/60*duration
return walking_y_pred
def prediction_swimming(weight, duration, sports):
global swimming_df
swimming_data = {'weight':[130,130,130,130,130,130,130,130,130,130,155,155,155,155,155,155,155,155,155,155,180,180,180,180,180,180,180,180,180,180,205,205,205,205,205,205,205,205,205,205],
'intensity/level':['freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate','freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate','freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate','freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate'],
'calories':[590,413,413,590,649,354,472,472,590,236,704,493,493,704,774,422,563,563,704,281,817,572,572,817,899,490,654,654,817,327,931,651,651,931,1024,558,745,745,931,372]}
swimming_df = | pd.DataFrame(swimming_data) | pandas.DataFrame |
import pandas as pd
import pytest
from bach import DataFrame
from bach.series.series_multi_level import SeriesNumericInterval
@pytest.fixture()
def interval_data_pdf() -> pd.DataFrame:
pdf = pd.DataFrame(
{
'lower': [0., 0., 3., 5., 1., 2., 3., 4., 5.],
'upper': [1., 1., 4., 6., 2., 3., 4., 5., 6.],
'a': [10, 15, 20, 25, 30, 35, 40, 45, 50],
},
)
pdf['bounds'] = '(]'
return pdf
def test_series_numeric_interval_to_pandas(engine, interval_data_pdf: pd.DataFrame) -> None:
df = DataFrame.from_pandas(engine=engine, df=interval_data_pdf, convert_objects=True)
df['range'] = SeriesNumericInterval.from_value(
base=df,
name='num_interval',
value={
'lower': df['lower'],
'upper': df['upper'],
'bounds': df['bounds'],
}
)
expected = pd.DataFrame(
{
'_index_0': [0, 1, 2, 3, 4, 5, 6, 7, 8],
'range': [
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
pd.Interval(left=1., right=2., closed='right'),
pd.Interval(left=2., right=3., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=4., right=5., closed='right'),
| pd.Interval(left=5., right=6., closed='right') | pandas.Interval |
import time
import numpy as np
from loguru import logger
import psycopg2.extras as extras
import os
import pandas as pd
import functools
logger.remove(0)
logger.add("sampling.log", level="DEBUG", enqueue=True, mode="w")
def timeit(f_py=None, to_log=None):
assert callable(f_py) or f_py is None
def _decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
if to_log:
logger.debug(
"Function '{}' executed in {:f} s", func.__name__, end - start
)
else:
print(f"| Finished in {end-start:.2f}s.")
return result
return wrapper
return _decorator(f_py) if callable(f_py) else _decorator
@timeit(to_log=True)
def get_load_data(
demand_scenario_id: int,
force_download=False,
**kwargs,
):
""" Query the load data from the database"""
fname = f"load_data-{demand_scenario_id}.csv"
if not os.path.exists(fname) or force_download:
df = read_from_db(
table_name="demand_timeseries",
where_clause=f"demand_scenario_id = '{demand_scenario_id}'",
**kwargs
)
df = df.sort_values(["load_zone_id", "raw_timepoint_id"])
df["date"] = df["timestamp_utc"].dt.strftime("%Y-%m-%d").values
df.to_csv(fname, index=False)
else:
df = | pd.read_csv(fname, parse_dates=["timestamp_utc"]) | pandas.read_csv |
# ©<NAME>, @brianruizy
# Created: 03-15-2020
import datetime
import platform
import pandas as pd
# Datasets scraped can be found in the following URL's:
# https://github.com/CSSEGISandData/COVID-19
# https://github.com/owid/covid-19-data/tree/master/public/data
# Different styles in zero-padding in date depend on operating systems
if platform.system() == 'Linux':
STRFTIME_DATA_FRAME_FORMAT = '%-m/%-d/%y'
elif platform.system() == 'Windows':
STRFTIME_DATA_FRAME_FORMAT = '%#m/%#d/%y'
else:
STRFTIME_DATA_FRAME_FORMAT = '%-m/%-d/%y'
def daily_report(date_string=None):
# Reports aggegrade data, dating as far back to 01-22-2020
# If passing arg, must use above date formatting '01-22-2020'
report_directory = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
if date_string is None:
yesterday = datetime.date.today() - datetime.timedelta(days=2)
file_date = yesterday.strftime('%m-%d-%Y')
else:
file_date = date_string
df = pd.read_csv(report_directory + file_date + '.csv')
return df
def daily_confirmed():
# returns the daily reported cases for respective date,
# segmented globally and by country
df = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_cases.csv')
return df
def daily_deaths():
# returns the daily reported deaths for respective date,
df = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_deaths.csv')
return df
def confirmed_report():
# Returns time series version of total cases confirmed globally
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
return df
def deaths_report():
# Returns time series version of total deaths globally
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
return df
def recovered_report():
# Return time series version of total recoveries globally
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
return df
def realtime_growth(date_string=None, weekly=False, monthly=False):
"""[summary]: consolidates all reports, to create time series of statistics.
Columns excluded with list comp. are: ['Province/State','Country/Region','Lat','Long'].
Args:
date_string: must use following date formatting '4/12/20'.
weekly: bool, returns df for last 8 weks
monthly: bool, returns df for last 3 months
Returns:
[growth_df] -- [growth in series]
"""
df1 = confirmed_report()[confirmed_report().columns[4:]].sum()
df2 = deaths_report()[deaths_report().columns[4:]].sum()
df3 = recovered_report()[recovered_report().columns[4:]].sum()
growth_df = pd.DataFrame([])
growth_df['Confirmed'], growth_df['Deaths'], growth_df['Recovered'] = df1, df2, df3
growth_df.index = growth_df.index.rename('Date')
yesterday = pd.Timestamp('now').date() - pd.Timedelta(days=1)
if date_string is not None:
return growth_df.loc[growth_df.index == date_string]
if weekly is True:
weekly_df = pd.DataFrame([])
intervals = | pd.date_range(end=yesterday, periods=8, freq='7D') | pandas.date_range |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from urllib.parse import urljoin
from string import ascii_lowercase
from sqlite3 import connect
from rich.progress import track
import numpy as np
import pandas as pd
from .base import Datasets, FETCH_URLS
class ContinuousCategoricalDatasets(Datasets):
"""Class to download, transform and save datasets with both continuous
and categorical features."""
@staticmethod
def _modify_columns(data, categorical_features):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1), categorical_features
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data, categorical_features = self._modify_columns(*fetch_data())
self.content_.append((name, data, categorical_features))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
def fetch_adult(self):
"""Download and transform the Adult Data Set.
https://archive.ics.uci.edu/ml/datasets/Adult
"""
data = pd.read_csv(FETCH_URLS["adult"], header=None, na_values=" ?").dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3, 5, 6, 7, 8, 9, 13]
return data, categorical_features
def fetch_abalone(self):
"""Download and transform the Abalone Data Set.
https://archive.ics.uci.edu/ml/datasets/Abalone
"""
data = pd.read_csv(FETCH_URLS["abalone"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0]
return data, categorical_features
def fetch_acute(self):
"""Download and transform the Acute Inflammations Data Set.
https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
"""
data = pd.read_csv(
FETCH_URLS["acute"], header=None, sep="\t", decimal=",", encoding="UTF-16"
)
data["target"] = data[6].str[0] + data[7].str[0]
data.drop(columns=[6, 7], inplace=True)
categorical_features = list(range(1, 6))
return data, categorical_features
def fetch_annealing(self):
"""Download and transform the Annealing Data Set.
https://archive.ics.uci.edu/ml/datasets/Annealing
"""
data = pd.read_csv(FETCH_URLS["annealing"], header=None, na_values="?")
# some features are dropped; they have too many missing values
missing_feats = (data.isnull().sum(0) / data.shape[0]) < 0.1
data = data.iloc[:, missing_feats.values]
data[2].fillna(data[2].mode().squeeze(), inplace=True)
data = data.T.reset_index(drop=True).T
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 1, 5, 9]
return data, categorical_features
def fetch_census(self):
"""Download and transform the Census-Income (KDD) Data Set.
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
"""
data = pd.read_csv(FETCH_URLS["census"], header=None)
categorical_features = (
list(range(1, 5))
+ list(range(6, 16))
+ list(range(19, 29))
+ list(range(30, 38))
+ [39]
)
# some features are dropped; they have too many missing values
cols_ids = [1, 6, 9, 13, 14, 20, 21, 29, 31, 37]
categorical_features = np.argwhere(
np.delete(
data.rename(columns={k: f"nom_{k}" for k in categorical_features})
.columns.astype("str")
.str.startswith("nom_"),
cols_ids,
)
).squeeze()
data = data.drop(columns=cols_ids).T.reset_index(drop=True).T
# some rows are dropped; they have rare missing values
data = data.iloc[
data.applymap(lambda x: x != " Not in universe").all(1).values, :
]
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
return data, categorical_features
def fetch_contraceptive(self):
"""Download and transform the Contraceptive Method Choice Data Set.
https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice
"""
data = pd.read_csv(FETCH_URLS["contraceptive"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [4, 5, 6, 8]
return data, categorical_features
def fetch_covertype(self):
"""Download and transform the Covertype Data Set.
https://archive.ics.uci.edu/ml/datasets/Covertype
"""
data = pd.read_csv(FETCH_URLS["covertype"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
wilderness_area = pd.Series(
np.argmax(data.iloc[:, 10:14].values, axis=1), name=10
)
soil_type = pd.Series(np.argmax(data.iloc[:, 14:54].values, axis=1), name=11)
data = (
data.drop(columns=list(range(10, 54)))
.join(wilderness_area)
.join(soil_type)[list(range(0, 12)) + ["target"]]
)
categorical_features = [10, 11]
return data, categorical_features
def fetch_credit_approval(self):
"""Download and transform the Credit Approval Data Set.
https://archive.ics.uci.edu/ml/datasets/Credit+Approval
"""
data = pd.read_csv(
FETCH_URLS["credit_approval"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 3, 4, 5, 6, 8, 9, 11, 12]
return data, categorical_features
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
https://archive.ics.uci.edu/ml/datasets/Dermatology
"""
data = pd.read_csv(
FETCH_URLS["dermatology"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = list(range(data.shape[1] - 1))
categorical_features.remove(33)
return data, categorical_features
def fetch_echocardiogram(self):
"""Download and transform the Echocardiogram Data Set.
https://archive.ics.uci.edu/ml/datasets/Echocardiogram
"""
data = pd.read_csv(
FETCH_URLS["echocardiogram"],
header=None,
error_bad_lines=False,
warn_bad_lines=False,
na_values="?",
)
data.drop(columns=[10, 11], inplace=True)
data.dropna(inplace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3]
return data, categorical_features
def fetch_flags(self):
"""Download and transform the Flags Data Set.
https://archive.ics.uci.edu/ml/datasets/Flags
"""
data = pd.read_csv(FETCH_URLS["flags"], header=None)
target = data[6].rename("target")
data = data.drop(columns=[0, 6]).T.reset_index(drop=True).T.join(target)
categorical_features = [
0,
1,
4,
8,
9,
10,
11,
12,
13,
14,
15,
21,
22,
23,
24,
25,
26,
27,
]
return data, categorical_features
def fetch_heart_disease(self):
"""Download and transform the Heart Disease Data Set.
https://archive.ics.uci.edu/ml/datasets/Heart+Disease
"""
data = (
pd.concat(
[
pd.read_csv(url, header=None, na_values="?")
for url in FETCH_URLS["heart_disease"]
],
ignore_index=True,
)
.drop(columns=[10, 11, 12])
.dropna()
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8]
return data, categorical_features
def fetch_hepatitis(self):
"""Download and transform the Hepatitis Data Set.
https://archive.ics.uci.edu/ml/datasets/Hepatitis
"""
data = (
pd.read_csv(FETCH_URLS["hepatitis"], header=None, na_values="?")
.drop(columns=[15, 18])
.dropna()
)
target = data[0].rename("target")
data = data.drop(columns=[0]).T.reset_index(drop=True).T.join(target)
categorical_features = list(range(1, 13)) + [16]
return data, categorical_features
def fetch_german_credit(self):
"""Download and transform the German Credit Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
data = pd.read_csv(FETCH_URLS["german_credit"], header=None, sep=" ")
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = (
np.argwhere(data.iloc[0, :-1].apply(lambda x: str(x)[0] == "A").values)
.squeeze()
.tolist()
)
return data, categorical_features
def fetch_heart(self):
"""Download and transform the Heart Data Set.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8, 10, 12]
return data, categorical_features
def fetch_thyroid(self):
"""Download and transform the Thyroid Disease Data Set.
Label 0 corresponds to no disease found.
Label 1 corresponds to one or multiple diseases found.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = (
pd.read_csv(FETCH_URLS["thyroid"], header=None, na_values="?")
.drop(columns=27)
.dropna()
.T.reset_index(drop=True)
.T
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
data["target"] = (
data["target"].apply(lambda x: x.split("[")[0]) != "-"
).astype(int)
categorical_features = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
18,
20,
22,
24,
26,
27,
]
return data, categorical_features
class MultiClassDatasets(Datasets):
"""Class to download, transform and save multi-class datasets."""
def fetch_first_order_theorem(self):
"""Download and transform the First Order Theorem Data Set.
https://www.openml.org/d/1475
"""
data = pd.read_csv(FETCH_URLS["first_order_theorem"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_gas_drift(self):
"""Download and transform the Gas Drift Data Set.
https://www.openml.org/d/1476
"""
data = pd.read_csv(FETCH_URLS["gas_drift"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_autouniv_au7(self):
"""Download and transform the AutoUniv au7 Data Set
https://www.openml.org/d/1552
"""
data = pd.read_csv(FETCH_URLS["autouniv_au7"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_autouniv_au4(self):
"""Download and transform the AutoUniv au4 Data Set
https://www.openml.org/d/1548
"""
data = pd.read_csv(FETCH_URLS["autouniv_au4"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_mice_protein(self):
"""Download and transform the Mice Protein Data Set
https://www.openml.org/d/40966
"""
data = pd.read_csv(FETCH_URLS["mice_protein"])
data.rename(columns={"class": "target"}, inplace=True)
data.drop(columns=["MouseID"], inplace=True)
data.replace("?", np.nan, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
mask2 = data.isna().sum() < 10
data = data.loc[:, mask & mask2].dropna().copy()
data.iloc[:, :-1] = data.iloc[:, :-1].astype(float)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_steel_plates(self):
"""Download and transform the Steel Plates Fault Data Set.
https://www.openml.org/d/40982
"""
data = pd.read_csv(FETCH_URLS["steel_plates"])
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_cardiotocography(self):
"""Download and transform the Cardiotocography Data Set.
https://www.openml.org/d/1560
"""
data = pd.read_csv(FETCH_URLS["cardiotocography"])
data.rename(columns={"Class": "target"}, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_waveform(self):
"""Download and transform the Waveform Database Generator (version 2) Data Set.
https://www.openml.org/d/60
"""
data = pd.read_csv(FETCH_URLS["waveform"])
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_volkert(self):
"""Download and transform the Volkert Data Set.
https://www.openml.org/d/41166
"""
data = pd.read_csv(FETCH_URLS["volkert"])
data.rename(columns={"class": "target"}, inplace=True)
mask = (data.iloc[:, 1:].nunique() > 100).tolist()
mask.insert(0, True)
data = data.loc[:, mask].copy()
return data
def fetch_vehicle(self):
"""Download and transform the Vehicle Silhouettes Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+(Vehicle+Silhouettes)
"""
data = pd.DataFrame()
for letter in ascii_lowercase[0:9]:
partial_data = pd.read_csv(
urljoin(FETCH_URLS["vehicle"], "xa%s.dat" % letter),
header=None,
delim_whitespace=True,
)
partial_data = partial_data.rename(columns={18: "target"})
data = data.append(partial_data)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_asp_potassco(self):
"""Download and transform the ASP-POTASSCO Data Set.
https://www.openml.org/d/41705
"""
data = pd.read_csv(FETCH_URLS["asp_potassco"], na_values="?")
data.dropna(inplace=True)
data["target"] = data["algorithm"]
data.drop(columns=["instance_id", "algorithm"], inplace=True)
mask = (data.iloc[:, :-1].nunique() > 100).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_wine_quality(self):
"""Download and transform the Wine Quality Data Set.
https://www.openml.org/d/40691
"""
data = pd.read_csv(FETCH_URLS["wine_quality"])
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_mfeat_zernike(self):
"""Download and transform the Multiple Features Dataset: Zernike Data Set.
https://www.openml.org/d/22
"""
data = pd.read_csv(FETCH_URLS["mfeat_zernike"])
data.drop_duplicates(inplace=True)
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_gesture_segmentation(self):
"""Download and transform the Gesture Phase Segmentation Data Set.
https://www.openml.org/d/4538
"""
data = pd.read_csv(FETCH_URLS["gesture_segmentation"])
data.rename(columns={"Phase": "target"}, inplace=True)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_texture(self):
"""Download and transform the Texture Data Set.
https://www.openml.org/d/40499
"""
data = pd.read_csv(FETCH_URLS["texture"])
data.drop_duplicates(inplace=True)
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_usps(self):
"""Download and transform the USPS Data Set.
https://www.openml.org/data/get_csv/19329737/usps.arff
"""
data = pd.read_csv(FETCH_URLS["usps"])
data.rename(columns={"int0": "target"}, inplace=True)
return data
def fetch_japanese_vowels(self):
"""Download and transform the Japanese Vowels Data Set.
https://www.openml.org/d/375
"""
data = pd.read_csv(FETCH_URLS["japanese_vowels"])
data.rename(columns={"speaker": "target"}, inplace=True)
data.drop(columns=["utterance", "frame"], inplace=True)
return data
def fetch_pendigits(self):
"""Download and transform the Pen-Based Recognition of Handwritten
Digits Data Set.
https://www.openml.org/d/32
"""
data = pd.read_csv(FETCH_URLS["pendigits"])
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_image_segmentation(self):
"""Download and transform the Image Segmentation Data Set.
https://www.openml.org/d/40984
"""
data = pd.read_csv(FETCH_URLS["image_segmentation"])
data.drop(columns=data.columns[:5], inplace=True)
data.rename(columns={"class": "target"}, inplace=True)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_baseball(self):
"""Download and transform the Baseball Hall of Fame Data Set.
https://www.openml.org/d/185
"""
data = | pd.read_csv(FETCH_URLS["baseball"], na_values="?") | pandas.read_csv |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Series, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = f"Categories ({len(self.categories)}, {dtype}): "
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {len(self)}\n{info}"
def _get_repr(self, length=True, na_rep="NaN", footer=True) -> str:
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
"""
Return an item.
"""
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
"""
Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if isinstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# else: array of True/False in Series or Categorical
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(
self.codes.astype("int64"), categories.size
)
counts = counts.cumsum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, _result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
raise TypeError(f"Categorical cannot perform the operation {name}")
return func(**kwargs)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def min(self, skipna=True):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered("min")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].min()
else:
return np.nan
else:
pointer = self._codes.min()
return self.categories[pointer]
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def max(self, skipna=True):
"""
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered("max")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].max()
else:
return np.nan
else:
pointer = self._codes.max()
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
See Also
--------
pandas.unique
CategoricalIndex.unique
Series.unique
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list("baabc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list("baabc"), categories=list("abc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(
... list("baabc"), categories=list("abc"), ordered=True
... ).unique()
[b, a, c]
Categories (3, object): [a < b < c]
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype("int64")
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(
original.categories.take(uniques), dtype=original.dtype
)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
"""
Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ["counts", "freqs"]
result.index.name = "categories"
return result
@Substitution(klass="Categorical")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import concat_categorical
return | concat_categorical(to_concat) | pandas.core.dtypes.concat.concat_categorical |
'''
main.py
----------
<NAME>
June 6, 2018
Given a company's landing page on Glassdoor and an output filename, scrape the
following information about each employee review:
Review date
Employee position
Employee location
Employee status (current/former)
Review title
Number of helpful votes
Pros text
Cons text
Advice to mgmttext
Ratings for each of 5 categories
Overall rating
'''
import time
import pandas as pd
from argparse import ArgumentParser
import argparse
import logging
import logging.config
from selenium import webdriver as wd
from selenium.webdriver import ActionChains
import selenium
import numpy as np
from schema import SCHEMA
import json
import urllib
import datetime as dt
start = time.time()
DEFAULT_URL = ('https://www.glassdoor.com/Overview/Working-at-'
'Premise-Data-Corporation-EI_IE952471.11,35.htm')
parser = ArgumentParser()
parser.add_argument('-u', '--url',
help='URL of the company\'s Glassdoor landing page.',
default=DEFAULT_URL)
parser.add_argument('-f', '--file', default='glassdoor_ratings.csv',
help='Output file.')
parser.add_argument('--headless', action='store_true',
help='Run Chrome in headless mode.')
parser.add_argument('--username', help='Email address used to sign in to GD.')
parser.add_argument('-p', '--password', help='Password to sign in to GD.')
parser.add_argument('-c', '--credentials', help='Credentials file')
parser.add_argument('-l', '--limit', default=25,
action='store', type=int, help='Max reviews to scrape')
parser.add_argument('--start_from_url', action='store_true',
help='Start scraping from the passed URL.')
parser.add_argument(
'--max_date', help='Latest review date to scrape.\
Only use this option with --start_from_url.\
You also must have sorted Glassdoor reviews ASCENDING by date.',
type=lambda s: dt.datetime.strptime(s, "%Y-%m-%d"))
parser.add_argument(
'--min_date', help='Earliest review date to scrape.\
Only use this option with --start_from_url.\
You also must have sorted Glassdoor reviews DESCENDING by date.',
type=lambda s: dt.datetime.strptime(s, "%Y-%m-%d"))
args = parser.parse_args()
if not args.start_from_url and (args.max_date or args.min_date):
raise Exception(
'Invalid argument combination:\
No starting url passed, but max/min date specified.'
)
elif args.max_date and args.min_date:
raise Exception(
'Invalid argument combination:\
Both min_date and max_date specified.'
)
if args.credentials:
with open(args.credentials) as f:
d = json.loads(f.read())
args.username = d['username']
args.password = d['password']
else:
try:
with open('secret.json') as f:
d = json.loads(f.read())
args.username = d['username']
args.password = d['password']
except FileNotFoundError:
msg = 'Please provide Glassdoor credentials.\
Credentials can be provided as a secret.json file in the working\
directory, or passed at the command line using the --username and\
--password flags.'
raise Exception(msg)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(lineno)d\
:%(filename)s(%(process)d) - %(message)s')
ch.setFormatter(formatter)
logging.getLogger('selenium').setLevel(logging.CRITICAL)
logging.getLogger('selenium').setLevel(logging.CRITICAL)
def scrape(field, review, author):
def scrape_date(review):
jobtitle = author.find_element_by_class_name('authorJobTitle').text.strip('"')
res = dt.datetime.strptime(jobtitle.split('-')[0].strip(' '),'%b %d, %Y').date()
return res
def scrape_emp_title(review):
if 'Anonymous Employee' not in review.text:
try:
jobtitle = review.find_element_by_class_name('authorJobTitle').text.strip('"')
res = jobtitle.split('-')[1].strip(' ')
except Exception:
logger.warning('Failed to scrape employee_title')
res = "N/A"
else:
res = "Anonymous"
return res
def scrape_location(review):
if 'in' in review.text:
try:
res = author.find_element_by_class_name(
'authorLocation').text
except Exception:
logger.warning('Failed to scrape employee_location')
res = np.nan
else:
res = "N/A"
return res
def scrape_status(review):
try:
res = review.find_element_by_class_name('pt-xsm').text.strip('"')
except Exception:
logger.warning('Failed to scrape employee_status')
res = "N/A"
return res
def scrape_rev_title(review):
return review.find_element_by_class_name('mb-xxsm').text.strip('"')
def scrape_helpful(review):
try:
helpful = review.find_element_by_class_name('common__EiReviewDetailsStyle__socialHelpfulcontainer').text
if 'people found this review helpful' in helpful:
res = int(helpful.split(' ')[0])
else:
res = 0
except Exception:
res = 0
return res
def scrape_pros(review):
try:
comments = review.find_elements_by_class_name('v2__EIReviewDetailsV2__fullWidth')
res = np.nan
for r in comments:
if r.find_element_by_tag_name('p').text=='Pros':
res = r.find_element_by_tag_name('span').text
except Exception:
res = np.nan
return res
def scrape_cons(review):
try:
comments = review.find_elements_by_class_name('v2__EIReviewDetailsV2__fullWidth')
res = np.nan
for r in comments:
if r.find_element_by_tag_name('p').text=='Cons':
res = r.find_element_by_tag_name('span').text
except Exception:
res = np.nan
return res
def scrape_advice(review):
try:
comments = review.find_elements_by_class_name('v2__EIReviewDetailsV2__fullWidth')
res = np.nan
for r in comments:
if r.find_element_by_tag_name('span').get_attribute('data-test')=='advice-management':
res = r.find_element_by_tag_name('span').text
except Exception:
res = np.nan
return res
def scrape_overall_rating(review):
try:
ratings = review.find_element_by_class_name('ratingNumber')
res = float(ratings.text[:3])
except Exception:
res = np.nan
return res
def _scrape_subrating(i):
try:
r = review.find_element_by_class_name('tooltipContainer').find_elements_by_tag_name('li')
if (i == 2) and (len(r)!=6):
res = np.nan
else:
if (i > 2):
if (len(r) != 6):
i = i-1
srdiv = r[i].find_elements_by_tag_name('div')
srclass = srdiv[1].get_attribute('class')
srclass = srclass.split()[0]
res = v.index(srclass)+1
except Exception:
res = np.nan
return res
def scrape_work_life_balance(review):
return _scrape_subrating(0)
def scrape_culture_and_values(review):
return _scrape_subrating(1)
def scrape_diversity_inclusion(review):
return _scrape_subrating(2)
def scrape_career_opportunities(review):
return _scrape_subrating(3)
def scrape_comp_and_benefits(review):
return _scrape_subrating(4)
def scrape_senior_management(review):
return _scrape_subrating(5)
def _scrape_checkmark(i):
try:
r = review.find_element_by_class_name('recommends').find_elements_by_class_name('SVGInline-svg')
att = r[i].get_attribute('class')
if att=='SVGInline-svg css-hcqxoa-svg d-flex-svg':
res = 'mark'
elif att=='SVGInline-svg css-1h93d4v-svg d-flex-svg':
res = 'line'
elif att=='SVGInline-svg css-1kiw93k-svg d-flex-svg':
res = 'cross'
elif att=='SVGInline-svg css-10xv9lv-svg d-flex-svg':
res = 'circle'
else:
res = np.nan
except Exception:
res = np.nan
return res
def scrape_recommends(review):
return _scrape_checkmark(0)
def scrape_approve_ceo(review):
return _scrape_checkmark(1)
def scrape_outlook(review):
return _scrape_checkmark(2)
def scrape_featured(review):
try:
review.find_element_by_class_name('common__EiReviewDetailsStyle__newFeaturedReview')
return True
except selenium.common.exceptions.NoSuchElementException:
return False
funcs = [
scrape_date,
scrape_emp_title,
scrape_location,
scrape_status,
scrape_rev_title,
scrape_helpful,
scrape_pros,
scrape_cons,
scrape_advice,
scrape_overall_rating,
scrape_work_life_balance,
scrape_culture_and_values,
scrape_diversity_inclusion,
scrape_career_opportunities,
scrape_comp_and_benefits,
scrape_senior_management,
scrape_recommends,
scrape_outlook,
scrape_approve_ceo,
scrape_featured
]
# mapping from subrating to integer value for 1,2,3,4,5 stars
v = ['css-xd4dom','css-18v8tui','css-vl2edp','css-1nuumx7','css-s88v13']
fdict = dict((s, f) for (s, f) in zip(SCHEMA, funcs))
return fdict[field](review)
def extract_from_page():
def expand_show_more(review):
try:
continue_link = review.find_element_by_class_name('v2__EIReviewDetailsV2__newUiCta')
continue_link.click()
except Exception:
pass
def extract_review(review):
try:
author = review.find_element_by_class_name('authorInfo')
except:
return None # Account for reviews that have been blocked
res = {}
# import pdb;pdb.set_trace()
for field in SCHEMA:
res[field] = scrape(field, review, author)
assert set(res.keys()) == set(SCHEMA)
return res
logger.info(f'Extracting reviews from page {page[0]}')
res = | pd.DataFrame([], columns=SCHEMA) | pandas.DataFrame |
import mlrose
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.metrics import accuracy_score
from alg_runner import sim_annealing_runner, rhc_runner, ga_runner, mimic_runner
from plotting import plot_montecarlo_sensitivity
import os
import pickle
from datetime import datetime
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
np.random.seed(1)
def run_flipflop():
# If the output/Cpeaks directory doesn't exist, create it.
if not os.path.exists('./output/FlipFlop/'):
os.mkdir('./output/FlipFlop/')
# TODO Write state regeneration functions as lamdas
problem_size = 50
logger = logging.getLogger(__name__)
flip_fit = mlrose.FlipFlop()
flop_state_gen = lambda: np.random.randint(2, size=problem_size)
init_state = flop_state_gen()
problem = mlrose.DiscreteOpt(length=problem_size, fitness_fn=flip_fit, maximize=True, max_val=2)
all_results = {}
print("Running simulated annealing montecarlos")
sa_results, sa_timing = sim_annealing_runner(problem, init_state, state_regenerator=flop_state_gen)
plot_montecarlo_sensitivity('FlipFlop', 'sim_anneal', sa_results)
plot_montecarlo_sensitivity('FlipFlop', 'sim_anneal_timing', sa_timing)
all_results['SA'] = [sa_results, sa_timing]
print("Running random hill montecarlos")
rhc_results, rhc_timing = rhc_runner(problem, init_state, state_regenerator=flop_state_gen)
plot_montecarlo_sensitivity('FlipFlop', 'rhc', rhc_results)
plot_montecarlo_sensitivity('FlipFlop', 'rhc_timing', sa_timing)
all_results['RHC'] = [rhc_results, rhc_timing]
print("Running genetic algorithm montecarlos")
ga_results, ga_timing = ga_runner(problem, init_state, state_regenerator=flop_state_gen)
plot_montecarlo_sensitivity('FlipFlop', 'ga', ga_results)
plot_montecarlo_sensitivity('FlipFlop', 'ga_timing', ga_timing)
all_results['GA'] = [ga_results, ga_timing]
print("Running MIMIC montecarlos")
mimic_results, mimic_timing = mimic_runner(problem, init_state, state_regenerator=flop_state_gen)
plot_montecarlo_sensitivity('FlipFlop', 'mimic', mimic_results)
plot_montecarlo_sensitivity('FlipFlop', 'mimic_timing', mimic_timing)
all_results['MIMIC'] = [mimic_results, mimic_timing]
with open('./output/FlipFlop/flipflip_data.pickle', 'wb') as handle:
pickle.dump(all_results, handle, protocol=pickle.HIGHEST_PROTOCOL)
problem_size_space = np.linspace(10, 125, 20, dtype=int)
best_fit_dict = {}
best_fit_dict['Problem Size'] = problem_size_space
best_fit_dict['Random Hill Climbing'] = []
best_fit_dict['Simulated Annealing'] = []
best_fit_dict['Genetic Algorithm'] = []
best_fit_dict['MIMIC'] = []
times = {}
times['Problem Size'] = problem_size_space
times['Random Hill Climbing'] = []
times['Simulated Annealing'] = []
times['Genetic Algorithm'] = []
times['MIMIC'] = []
fits_per_iteration = {}
fits_per_iteration['Random Hill Climbing'] = []
fits_per_iteration['Simulated Annealing'] = []
fits_per_iteration['Genetic Algorithm'] = []
fits_per_iteration['MIMIC'] = []
for prob_size in problem_size_space:
logger.info("---- Problem size: " + str(prob_size) + " ----")
prob_size_int = int(prob_size)
flip_fit = mlrose.FlipFlop()
flop_state_gen = lambda: np.random.randint(2, size=prob_size_int)
init_state = flop_state_gen()
problem = mlrose.DiscreteOpt(length=prob_size_int, fitness_fn=flip_fit, maximize=True, max_val=2)
start = datetime.now()
_, best_fitness_sa, fit_array_sa = mlrose.simulated_annealing(problem,
schedule=mlrose.ExpDecay(exp_const=.001, init_temp=2),
max_attempts=20,
max_iters=10000, init_state=init_state, track_fits=True)
best_fit_dict['Simulated Annealing'].append(best_fitness_sa)
end = datetime.now()
times['Simulated Annealing'].append((end-start).total_seconds())
start = datetime.now()
_, best_fitness_rhc, fit_array_rhc = mlrose.random_hill_climb(problem, max_attempts=200, max_iters=10000,
restarts=20, track_fits=True)
best_fit_dict['Random Hill Climbing'].append(best_fitness_rhc)
end = datetime.now()
times['Random Hill Climbing'].append((end-start).total_seconds())
start = datetime.now()
_, best_fitness_ga, fit_array_ga = mlrose.genetic_alg(problem, pop_size=prob_size_int*5,
mutation_prob=.025, max_attempts=20, track_fits=True, max_iters=2000)
best_fit_dict['Genetic Algorithm'].append(best_fitness_ga)
end = datetime.now()
times['Genetic Algorithm'].append((end-start).total_seconds())
start = datetime.now()
_, best_fitness_mimic, fit_array_mimic = mlrose.mimic(problem, pop_size=prob_size_int*3,
keep_pct=.25, max_attempts=20, track_fits=True, max_iters=500)
best_fit_dict['MIMIC'].append(best_fitness_mimic)
end = datetime.now()
times['MIMIC'].append((end-start).total_seconds())
# For the last fit that occurs, save off the fit arrays that are generated. We will plot fitness/iteration.
fits_per_iteration['Random Hill Climbing'] = fit_array_rhc
fits_per_iteration['Simulated Annealing'] = fit_array_sa
fits_per_iteration['Genetic Algorithm'] = fit_array_ga
fits_per_iteration['MIMIC'] = fit_array_mimic
fit_frame = | pd.DataFrame.from_dict(best_fit_dict, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3.7
# coding: utf-8
# In[1]:
import sys
import rstr
import string
import random
import pandas as pd
from numpy.random import default_rng
import numpy as np
import time
#####INPUT PARAMETERS #####
## pattern
## stream_length
## num_sub_streams
## window_size
## num_matches
## strict
###########################
#regular exrpession
pattern= sys.argv[1] #'ab{0,2}c'
#length of generated stream (#events)
#this value include all individual substreams
#defined with the num_streams parameter (discussed next)
#the actual size of the stream is slightly larger because of
#the multi-event patterns we generate
stream_length = int(sys.argv[2]) #1000000
#num_sub_streams affects the number of sub-streams to create
#events will be distributed among sub-streams using UNI or ZIPF distribution
#use a value num_sub_streams > 0 for UNIFORM allocation of events acrorss num_sub_streams with ids in range[0,x)
#if num_sub_streams = 0 then a zipf distribution will be used with alpha = 2
num_sub_streams = int(sys.argv[3]) #2
#size of count-based window
window_size = int(sys.argv[4]) #5
#number of matches to generate
num_matches = int(sys.argv[5]) #5
strict = sys.argv[6] #True #if false then it will pad generated matches with random characters \
#file name
fileName = sys.argv[7] #"data.txt"
###############################
#note assuming uniform distribution each sub-stream will have about stream_length/(num_sub_streams*window_size) windows
verbose = False
# In[2]:
rng = default_rng()
alpha = 2
#build an array with id values based on selected distribution
if num_sub_streams > 0:
Stream_IDs = [random.choice(range(num_sub_streams)) for i in range(stream_length)]
else:
Stream_IDs = rng.zipf(alpha, stream_length)
# In[3]:
def get_stream_id():
return random.choice(Stream_IDs)
def randomstream_generator_df_old(size,chars = string.ascii_lowercase):
stream = pd.DataFrame(columns = ['pos', 'stream_id', 'event'])
for i in range(size):
stream_id = get_stream_id()
event = random.choice(chars)
stream.loc[i] = [i,stream_id,event]
return stream
def randomstream_generator_df(size, chars = string.ascii_lowercase):
data = []
for i in range(size):
stream_id = get_stream_id()
event = random.choice(chars)
data.append((i, stream_id,event))
#stream.loc[i] = [i,stream_id,event]
stream = | pd.DataFrame(data,columns = ['pos','stream_id', 'event']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""reVX PLEXOS unit test module
"""
from click.testing import CliRunner
import numpy as np
import json
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
import shutil
import tempfile
import traceback
from rex import Resource
from rex.utilities.loggers import LOGGERS
from reVX.plexos.rev_reeds_plexos import PlexosAggregation
from reVX.plexos.rev_reeds_plexos_cli import main
from reVX import TESTDATADIR
REV_SC = os.path.join(
TESTDATADIR,
'reV_sc/wtk_coe_2017_cem_v3_wind_conus_multiyear_colorado.csv')
REEDS_0 = os.path.join(TESTDATADIR, 'reeds/',
'BAU_wtk_coe_2017_cem_v3_wind_conus_'
'multiyear_US_wind_reeds_to_rev.csv')
REEDS_1 = os.path.join(TESTDATADIR, 'plexos/reeds_build.csv')
CF_FPATH = os.path.join(TESTDATADIR,
'reV_gen/naris_rev_wtk_gen_colorado_{}.h5')
PLEXOS_NODES = os.path.join(TESTDATADIR, 'plexos/plexos_nodes.csv')
PLEXOS_SHAPES = os.path.join(TESTDATADIR, 'reeds_pca_regions_test/',
'NA_PCA_Map.shp')
BASELINE = os.path.join(TESTDATADIR, 'plexos/rev_reeds_plexos.h5')
@pytest.fixture(scope="module")
def runner():
"""
cli runner
"""
return CliRunner()
def test_plexos_agg():
"""Test that a plexos node aggregation matches baseline results."""
outdir = os.path.join(os.path.dirname(__file__),
'data/aggregated_plexos_profiles/')
build_year = 2050
plexos_meta, _, profiles = PlexosAggregation.run(
PLEXOS_NODES, REV_SC, REEDS_1, CF_FPATH.format(2007),
build_year=build_year, max_workers=1)
fpath_meta = os.path.join(outdir, 'plexos_meta.csv')
fpath_profiles = os.path.join(outdir, 'profiles.csv')
if not os.path.exists(fpath_meta):
plexos_meta.to_csv(fpath_meta)
if not os.path.exists(fpath_profiles):
pd.DataFrame(profiles).to_csv(fpath_profiles)
baseline_meta = pd.read_csv(fpath_meta, index_col=0)
baseline_profiles = pd.read_csv(fpath_profiles, index_col=0)
for col in ('res_gids', 'res_built', 'gen_gids'):
baseline_meta[col] = baseline_meta[col].apply(json.loads)
assert all(baseline_meta['gen_gids'] == plexos_meta['gen_gids'])
assert np.allclose(baseline_meta['built_capacity'],
plexos_meta['built_capacity'])
assert np.allclose(baseline_profiles.values, profiles)
def test_bad_build_capacity():
"""Test that the PlexosAggregation code raises an error if it can't
build the full requested capacity."""
build_year = 2050
reeds_1 = | pd.read_csv(REEDS_1) | pandas.read_csv |
import json
import os
import pandas as pd
import scraper
class full_version:
def __init__(self):
self.data={}
self.name=""
self.email=""
self.user_data = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))),
"json",
"user_data.json"
)
self.user_list = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))),
"csvs",
"user_list.csv"
)
self.df=pd.DataFrame()
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', 40)
def login(self):
if not os.path.exists(self.user_data):
print("Welcome to Slash!")
print("Please enter the following information: ")
name=input("Name: ")
email=input("Email: ")
self.data['name']=name
self.data['email']=email
with open(self.user_data, 'w') as outfile:
json.dump(self.data, outfile)
self.name=name
self.email=email
else:
with open(self.user_data) as json_file:
data = json.load(json_file)
self.name=data['name']
self.email=data['email']
return self.name, self.email
def search_fn(self):
prod=input("Enter name of product to Search: ")
self.scrape(prod)
ch=int(input("\n\nEnter 1 to save product to list \nelse enter any other key to continue"))
if ch==1:
indx=int(input("Enter row number of product to save: "))
if indx<len(self.df):
if os.path.exists(self.user_list):
old_data=pd.read_csv(self.user_list)
else:
old_data= | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.mark.functions
def test_truncate_datetime_dataframe_invalid_datepart():
"""Checks if a ValueError is appropriately raised when datepart is
not a valid enumeration.
"""
with pytest.raises(ValueError, match=r"invalid `datepart`"):
pd.DataFrame().truncate_datetime_dataframe("INVALID")
@pytest.mark.functions
def test_truncate_datetime_dataframe_all_parts():
"""Test for truncate_datetime_dataframe, for all valid dateparts.
Also only passes if `truncate_datetime_dataframe` method is idempotent.
"""
x = datetime(2022, 3, 21, 9, 1, 15, 666)
df = | pd.DataFrame({"dt": [x], "foo": [np.nan]}, copy=False) | pandas.DataFrame |
import statistics
import json
import csv
from pathlib import Path
from promise.utils import deprecated
from scipy import stats
from .core import should_process, rename_exp
from .core import get_test_fitness
from .core import sort_algorithms
from .core import rename_alg
from .plotting import plot_twinx
import stac
import scipy.stats as ss
import scikit_posthocs as sp
def update_wdl(exp_data, wdltable, rename_map, wdltable_exp_names, exp_name, *,
base_line='WithoutKnowledge', num_generations=50):
"""
Computes the Win-Draw-Loss of experiment results given with the experiement data.
The function does not return any value but updates the input argument 'wdltable'
"""
# wins = 0
# draws = 0
# loses = 0
if not base_line:
return
generation = num_generations - 1
bmean = statistics.mean(list(exp_data[base_line][generation].values()))
for alg in exp_data:
if alg == base_line:
continue
renamed_alg = rename_alg(alg, rename_map)
if not renamed_alg in wdltable:
# wins, draws, losses, missing
wdltable[renamed_alg] = [0, 0, 0, 0]
if renamed_alg not in wdltable_exp_names:
# wins, draws, losses, missing
wdltable_exp_names[renamed_alg] = [[], [], [], []]
try:
mean = statistics.mean(list(exp_data[alg][generation].values()))
# base_mean = statistics.mean(list(exp_data[base_line][generation].values()))
except KeyError as e:
print(alg, e)
wdltable[renamed_alg][3] += 1
wdltable_exp_names[renamed_alg][3].append(exp_name)
continue
if len(list(exp_data[base_line][generation].values())) != len(list(exp_data[alg][generation].values())):
print("Len of ", alg, "(", len(list(exp_data[alg][generation].values())), ") is not 30.")
wdltable[renamed_alg][3] += 1
wdltable_exp_names[renamed_alg][3].append(exp_name)
# continue
alg_len = len(list(exp_data[alg][generation].values()))
pval_wo_wil = stats.wilcoxon(list(exp_data[base_line][generation].values())[:alg_len],
list(exp_data[alg][generation].values()))[1]
if pval_wo_wil < 0.05:
if mean < bmean:
wdltable[renamed_alg][0] += 1
wdltable_exp_names[renamed_alg][0].append(exp_name)
else:
wdltable[renamed_alg][2] += 1
wdltable_exp_names[renamed_alg][2].append(exp_name)
else:
wdltable[renamed_alg][1] += 1
wdltable_exp_names[renamed_alg][1].append(exp_name)
def wdl(dirbase, experiments, inclusion_filter, exclusion_filter, rename_map, *, base_line='WithoutKnowledge',
num_generations=50, dump_file=Path('./wdl')):
"""
Computes the Win-Draw-Loss statistics of algorithms compared to a baseline. The function saves
the stats to a JSON and CSV files and also returns them. This function reads the fitness values
from the 'dirbase' location.
Usage: wdl(dirbase, experiments, inclusion_filter, exclusion_filter, dump_file=output_folder / 'wdl', rename_map=rename_map)
"""
wdltable = {}
wdltable_exp_name = {}
for exp in experiments:
print('WDL: processing', dirbase / exp)
exp_data = get_test_fitness(dirbase / exp, inclusion_filter, exclusion_filter, num_generations=num_generations)
update_wdl(exp_data, wdltable, rename_map, wdltable_exp_name, exp, base_line=base_line,
num_generations=num_generations)
with open(str(dump_file) + '.json', 'w') as file:
json.dump(wdltable, file, indent=4)
print('WDL: results saved to:', dump_file)
with open(str(dump_file) + '-expnames.json', 'w') as file:
json.dump(wdltable_exp_name, file, indent=4)
with open(str(dump_file) + '.csv', 'w', newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in wdltable.items():
writer.writerow([key, *value])
return wdltable, wdltable_exp_name
def wdl2(experiment_data, rename_map, *, base_line='WithoutKnowledge', num_generations=50, dump_file=Path('./wdl')):
"""
Computes the Win-Draw-Loss statistics of algorithms compared to a baseline. The function saves
the stats to a JSON and CSV files and also returns them. The function does not read fitness data
from files and treats 'experiment_data' as a dictionary that contains fitness information for
each experiment.
Usage: wdl2(experiment_data, dump_file=output_folder / 'wdl', rename_map=rename_map)
"""
wdltable = {}
wdltable_exp_name = {}
for exp in experiment_data:
print('WDL2: processing', exp)
exp_data = experiment_data[exp]
update_wdl(exp_data, wdltable, rename_map, wdltable_exp_name, exp, base_line=base_line,
num_generations=num_generations)
with open(str(dump_file) + '.json', 'w') as file:
json.dump(wdltable, file, indent=4)
print('WDL2: results saved to:', dump_file)
with open(str(dump_file) + '-expnames.json', 'w') as file:
json.dump(wdltable_exp_name, file, indent=4)
# print('WDL2: results saved to:', dump_file)
with open(str(dump_file) + '.csv', 'w', newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in wdltable.items():
writer.writerow([key, *value])
return wdltable, wdltable_exp_name
import pandas as pd
def friedman_test2(test_fitness, ignore_list=[]):
if len(test_fitness) < 3:
return -1, [], []
data = []
alg_names = []
for alg in test_fitness:
if alg in ignore_list:
continue
data.append(list(test_fitness[alg]))
alg_names.append(alg)
_, p, rank, pivot = stac.nonparametric_tests.friedman_test(*data)
post = {}
ctr = 0
for alg in test_fitness:
if alg in ignore_list:
continue
post[alg] = (pivot[ctr])
ctr = ctr + 1
names, z_values, p_values, adjusted_pval = stac.nonparametric_tests.nemenyi_multitest(post)
return p, list(zip(alg_names, rank)), list(zip(names, adjusted_pval)), list(zip(alg_names, z_values)), list(zip(alg_names, p_values))
def summary(dirbase, experiments, inclusion_filter, exclusion_filter, rename_map,
*, num_generations=50, dump_file=Path('./wdl'), baseline_alg):
def summarise(test_fit):
mini = round(min(list(test_fit.values())), 2)
maxi = round(max(list(test_fit.values())), 2)
mean = round(statistics.mean(list(test_fit.values())), 2)
std = round(statistics.stdev(list(test_fit.values())), 2)
median = round(statistics.median(list(test_fit.values())), 2)
return mini, maxi, mean, std, median
def pval(fitness, alg, generation):
if not baseline_alg or alg == baseline_alg:
pval_wo_wil = '--'
pval_wo_t = '--'
else:
if len(list(fitness[baseline_alg][generation].values())) != len(list(fitness[alg][generation].values())):
alg_len = len(list(fitness[alg][generation].values()))
print("Warning: Len of ", alg, "(", alg_len, ") is not 30. Test is done for this length.")
try:
pval_wo_wil = stats.wilcoxon(list(fitness[baseline_alg][generation].values())[:alg_len],
list(fitness[alg][generation].values()))[1]
pval_wo_t = stats.ttest_rel(list(fitness[baseline_alg][generation].values())[:alg_len],
list(fitness[alg][generation].values()))[1]
except ValueError:
pval_wo_t = -1
pval_wo_wil = -1
else:
pval_wo_wil = \
stats.wilcoxon(list(fitness[baseline_alg][generation].values()),
list(fitness[alg][generation].values()))[1]
pval_wo_wil = round(pval_wo_wil, 2)
pval_wo_t = \
stats.ttest_rel(list(fitness[baseline_alg][generation].values()),
list(fitness[alg][generation].values()))[
1]
pval_wo_t = round(pval_wo_t, 2)
return pval_wo_wil, pval_wo_t
def friedman_test(fitness, generation):
if len(fitness) < 3:
return -1, [], []
data = []
alg_names = []
for algorithm in fitness:
# if alg == baseline or len(test_fitness[alg][gen].values()) != 30:
if len(fitness[algorithm][generation].values()) != 30:
continue
data.append(list(fitness[algorithm][generation].values()))
alg_names.append(algorithm)
_, p, rank, pivot = stac.nonparametric_tests.friedman_test(*data)
post = {}
ctr = 0
for alg in fitness:
# if alg == baseline or len(test_fitness[alg][gen].values()) != 30:
if len(fitness[alg][generation].values()) != 30:
continue
post[alg] = (pivot[ctr])
ctr = ctr + 1
names, _, _, adjusted_pval = stac.nonparametric_tests.nemenyi_multitest(post)
return p, list(zip(alg_names, rank)), list(zip(names, adjusted_pval))
test_summary_table = {}
best_summary_table = {}
test_data_table = {}
best_data_table = {}
test_fri_table = {}
best_fri_table = {}
gen = 49
def do_summary(fitness, generation):
smmry_table = {}
fri_table = friedman_test(fitness, generation)
for algo in best_fitness:
mini, maxi, mean, std, median = summarise(fitness[algo][generation])
pval_wo_wil, pval_wo_t = pval(fitness, algo, generation)
if algo not in smmry_table:
smmry_table[algo] = {}
smmry_table[algo]['Average'] = mean
smmry_table[algo]['Stdev'] = std
smmry_table[algo]['min'] = mini
smmry_table[algo]['max'] = maxi
smmry_table[algo]['median'] = median
smmry_table[algo]['pval_wo_t'] = pval_wo_t
smmry_table[algo]['pval_wo_wil'] = pval_wo_wil
return smmry_table, fri_table
for exp in experiments:
print('Summary: processing', dirbase / exp)
test_fitness, best_fitness = get_test_fitness(dirbase / exp, inclusion_filter, exclusion_filter,
num_generations=num_generations)
test_data_table[exp] = test_fitness
best_data_table[exp] = best_fitness
test_summary_table[exp], test_fri_table[exp] = do_summary(test_fitness, gen)
best_summary_table[exp], best_fri_table[exp] = do_summary(best_fitness, -1)
return test_summary_table, test_data_table, test_fri_table, best_summary_table, best_data_table, best_fri_table
def save_stat2(summary_table, output_folder, rename_map, fried_table):
def calc_wdl(fried, base):
win, draw, loss = {}, {}, {}
for xp in fried:
# if fried[xp][0] >= 0.05:
# d = d + 1
# continue
for comparison in fried[xp][2]:
if base not in comparison[0]:
continue
print(comparison)
pval = comparison[1]
vs = comparison[0].replace(base, '').replace(' vs ', '')
ren_vs = rename_alg(vs, rename_map)
if pval >= 0.05:
draw[ren_vs] = draw.get(ren_vs, 0) + 1
continue
if summary_table[xp][base]['Average'] <= summary_table[xp][vs]['Average']:
win[ren_vs] = win.get(ren_vs, 0) + 1
else:
loss[ren_vs] = loss.get(ren_vs, 0) + 1
return win, draw, loss
def calc_wdl2(fried, alg1, alg2):
"""
Compares alg1 against alg2
:param fried: the friedman table
:param alg1: the baseline algorithm
:param alg2: the algorithm that alg1 is compared against.
:return: (wins, draws, losses) of alg1 against alg2
"""
win, draw, loss = 0, 0, 0
for xp in fried:
# if fried[xp][0] >= 0.05:
# d = d + 1
# continue
for comparison in fried[xp][2]:
if alg1 not in comparison[0]:
continue
if alg2 not in comparison[0]:
continue
pval = comparison[1]
# ren_alg1 = rename_alg(alg1, rename_map)
# ren_alg2 = rename_alg(alg2, rename_map)
if pval >= 0.05:
draw = draw + 1
continue
if summary_table[xp][alg1]['Average'] <= summary_table[xp][alg2]['Average']:
win = win + 1
else:
loss = loss + 1
return win, draw, loss
def fried_on_average(ave_df):
pval, ranks, posthoc, z_values, p_values = friedman_test2(ave_df.T.to_dict('list'))
ranks = {rename_alg(rank[0], rename_map): round(rank[1], 2) for rank in ranks}
z_values = {rename_alg(z[0], rename_map): z[1] for z in z_values}
p_values = {rename_alg(p[0], rename_map): p[1] for p in p_values}
ranks['p-val'] = pval
pd.Series(ranks).to_csv(output_folder / 'mean_table_ranks.csv')
pd.Series(ranks).to_latex(output_folder / 'mean_table_ranks.tex')
pd.Series(z_values).to_csv(output_folder / 'mean_table_z_values.csv')
pd.Series(z_values).to_latex(output_folder / 'mean_table_z_values.tex')
pd.Series(p_values).to_csv(output_folder / 'mean_table_unadj_p_values.csv')
pd.Series(p_values).to_latex(output_folder / 'mean_table_unadj_p_values.tex')
dc = {}
for s in posthoc:
s1, s2 = s[0].split(' vs ')
if s1 not in dc:
dc[s1] = {}
dc[s1][s2] = round(float(s[1]), 5)
df = pd.DataFrame(dc).fillna('--')
df.to_latex(output_folder / 'mean_table_ph.tex')
df.to_csv(output_folder / 'mean_table_ph.csv')
output_folder = Path(output_folder)
if not output_folder.exists():
output_folder.mkdir(parents=True)
all_averages = None
all_pvals = None
all_stds = None
all_ranks = pd.DataFrame()
scenario = 1
scenario_series = pd.Series(dtype='Int64')
for exp in summary_table:
exp_summary = {rename_alg(alg, rename_map): summary_table[exp][alg] for alg in
sort_algorithms(summary_table[exp])}
df = pd.DataFrame(exp_summary)
ren_exp = rename_exp(exp)
scenario_series[ren_exp] = scenario
save_path = output_folder / ren_exp
if not save_path.exists():
save_path.mkdir(parents=True)
df = df.T
ranks = fried_table[exp][1]
rnks = {rename_alg(rank[0], rename_map): rank[1] for rank in ranks}
df["Rank"] = | pd.Series(rnks) | pandas.Series |
# -*-coding:utf-8 -*-
'''
@File : preprocess.py
@Author : <NAME>
@Date : 2020/9/9
@Desc :
'''
import pandas as pd
import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_BASE = BASE_DIR + "/data/"
# print(DATA_BASE)
def data_preprocess(corpus_file_name):
""" 数据预处理 """
print("===================Start Preprocess======================")
df = pd.read_csv(DATA_BASE + corpus_file_name + ".csv") # 读取源数据,将数据解析为时间格式
# df["小时"] = df["time"].map(lambda x: int(x.strftime("%H"))) # 提取小时
df = df.drop_duplicates() # 去重
print("Remove duplicate items completed! ")
df = df.dropna(subset=["内容"]) # 删除 “评论内容” 空值行
# df = df.dropna(subset=["gender"]) # 删除 “性别” 空值行
print("Remove empty contents completed! ")
# df.to_csv(corpus_file_name+".csv") # 写入处理后的数据
print("===================数据清洗完毕======================")
return df
def get_phrases(corpus_file_name):
""" 从excel/csv文件中提取相应的短语组合 """
print("===================Start Withdraw======================")
print(DATA_BASE + corpus_file_name + ".csv")
df = | pd.read_csv("../data/" + corpus_file_name + ".csv") | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pandas as pd
import numpy as np
from qlib.contrib.report.data.base import FeaAnalyser
from qlib.contrib.report.utils import sub_fig_generator
from qlib.utils.paral import datetime_groupby_apply
from qlib.contrib.eva.alpha import pred_autocorr_all
from loguru import logger
import seaborn as sns
DT_COL_NAME = "datetime"
class CombFeaAna(FeaAnalyser):
"""
Combine the sub feature analysers and plot then in a single graph
"""
def __init__(self, dataset: pd.DataFrame, *fea_ana_cls):
if len(fea_ana_cls) <= 1:
raise NotImplementedError(f"This type of input is not supported")
self._fea_ana_l = [fcls(dataset) for fcls in fea_ana_cls]
super().__init__(dataset=dataset)
def skip(self, col):
return np.all(list(map(lambda fa: fa.skip(col), self._fea_ana_l)))
def calc_stat_values(self):
"""The statistics of features are finished in the underlying analysers"""
def plot_all(self, *args, **kwargs):
ax_gen = iter(sub_fig_generator(row_n=len(self._fea_ana_l), *args, **kwargs))
for col in self._dataset:
if not self.skip(col):
axes = next(ax_gen)
for fa, ax in zip(self._fea_ana_l, axes):
if not fa.skip(col):
fa.plot_single(col, ax)
ax.set_xlabel("")
ax.set_title("")
axes[0].set_title(col)
class NumFeaAnalyser(FeaAnalyser):
def skip(self, col):
is_obj = np.issubdtype(self._dataset[col], np.dtype("O"))
if is_obj:
logger.info(f"{col} is not numeric and is skipped")
return is_obj
class ValueCNT(FeaAnalyser):
def __init__(self, dataset: pd.DataFrame, ratio=False):
self.ratio = ratio
super().__init__(dataset)
def calc_stat_values(self):
self._val_cnt = {}
for col, item in self._dataset.items():
if not super().skip(col):
self._val_cnt[col] = item.groupby(DT_COL_NAME).apply(lambda s: len(s.unique()))
self._val_cnt = pd.DataFrame(self._val_cnt)
if self.ratio:
self._val_cnt = self._val_cnt.div(self._dataset.groupby(DT_COL_NAME).size(), axis=0)
# TODO: transfer this feature to other analysers
ymin, ymax = self._val_cnt.min().min(), self._val_cnt.max().max()
self.ylim = (ymin - 0.05 * (ymax - ymin), ymax + 0.05 * (ymax - ymin))
def plot_single(self, col, ax):
self._val_cnt[col].plot(ax=ax, title=col, ylim=self.ylim)
ax.set_xlabel("")
class FeaDistAna(NumFeaAnalyser):
def plot_single(self, col, ax):
sns.histplot(self._dataset[col], ax=ax, kde=False, bins=100)
ax.set_xlabel("")
ax.set_title(col)
class FeaInfAna(NumFeaAnalyser):
def calc_stat_values(self):
self._inf_cnt = {}
for col, item in self._dataset.items():
if not super().skip(col):
self._inf_cnt[col] = item.apply(np.isinf).astype(np.int).groupby(DT_COL_NAME).sum()
self._inf_cnt = | pd.DataFrame(self._inf_cnt) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
| tm.assert_panel_equal(shifted1, shifted2) | pandas.util.testing.assert_panel_equal |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [
pd.Period("2011-01", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.NaT,
pd.Period("2012-01-02", freq="D"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == "object"
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
result_12 = df1.combine_first(df2)
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
tm.assert_frame_equal(result_12, expected_12)
result_21 = df2.combine_first(df1)
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
tm.assert_frame_equal(result_21, expected_21)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
# see gh-20699
df1 = DataFrame({"isNum": [val]})
df2 = DataFrame({"isBool": [True]})
res = df1.combine_first(df2)
exp = DataFrame({"isBool": [True], "isNum": [val]})
tm.assert_frame_equal(res, exp)
def test_combine_first_string_dtype_only_na(self):
# GH: 37519
df = DataFrame({"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string")
df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype="string")
df.set_index(["a", "b"], inplace=True)
df2.set_index(["a", "b"], inplace=True)
result = df.combine_first(df2)
expected = DataFrame(
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string"
).set_index(["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"scalar1, scalar2",
[
(datetime(2020, 1, 1), datetime(2020, 1, 2)),
(pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")),
(pd.Timedelta("89 days"), pd.Timedelta("60 min")),
(pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")),
],
)
def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture):
# GH28481
na_value = nulls_fixture
frame = DataFrame([[na_value, na_value]], columns=["a", "b"])
other = DataFrame([[scalar1, scalar2]], columns=["b", "c"])
common_dtype = | find_common_type([frame.dtypes["b"], other.dtypes["b"]]) | pandas.core.dtypes.cast.find_common_type |
"""
1. Universal base class for luigi targets.
2. Target for saving pandas.DataFrame to CSV file.
3. Target for saving numpy.array to CSV file.
Example:
```
target = DataFrameCSVTarget('path/to/file.csv')
with target.open('w') as stream:
stream.write({'lol': 1, 'lal': 2})
with target.open('r') as stream:
dataframe = stream.read()
```
"""
from contextlib import contextmanager
from pathlib import Path
import pandas
import numpy
from luigi import Target
class BaseTarget(Target):
def exists(self):
raise NotImplementedError
@contextmanager
def open(self, mode='rw'):
try:
yield self
finally:
if 'w' in mode:
self.close()
def read(self):
raise NotImplementedError
def write(self, data):
raise NotImplementedError
def close(self):
pass
class DataFrameCSVTarget(BaseTarget):
"""Save pandas.DataFrame objects to one *.csv file.
"""
def __init__(self, path, name=None):
if isinstance(path, str):
path = Path(path)
if name is not None:
path /= name + '.csv'
self.path = path
self.dataframe = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 14:37:03 2019
@author: ppradeep
"""
import os
clear = lambda: os.system('cls')
clear()
## Import packages
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import pickle
# Classifiers
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm, preprocessing
path = 'C:/Users/Administrator/OneDrive/Profile/Desktop/HTTK/'
#path = 'Z:/Projects/HTTK/'
#%%
# Normalize descriptors: Transform variables to mean=0, variance=1
def normalizeDescriptors(X):
scaler = preprocessing.StandardScaler().fit(X)
transformed = scaler.transform(X)
x_norm = pd.DataFrame(transformed, index = X.index)
x_norm.columns = X.columns
return(scaler, x_norm)
#%%
###########################################################################
###########################################################################
## Build the final models
###########################################################################
###########################################################################
####-----------------------------------------------------------------------------------------------------------------
## Read training data
####-----------------------------------------------------------------------------------------------------------------
data1 = pd.read_csv(path+'data/Prachi-112117.txt', index_col = 'CAS').loc[:,['All.Compound.Names', 'Human.Funbound.plasma', 'Human.Clint']]
data1.rename(columns={'All.Compound.Names' : 'Name'}, inplace = True)
data2 = pd.read_excel(path+'data/AFFINITY_Model_Results-2018-02-27.xlsx', index_col = 'CAS').loc[:,['Name','Fup.Med']]
data2.rename(columns={'Name': 'All.Compound.Names','Fup.Med':'Human.Funbound.plasma'}, inplace = True)
data3 = pd.read_excel(path+'data/CLint-2018-03-01-Results.xlsx', index_col = 'CAS').loc[:,['Name','CLint.1uM.Median']]
data3.rename(columns={'Name': 'All.Compound.Names','CLint.1uM.Median':'Human.Clint'}, inplace = True)
#%%
####-----------------------------------------------------------------------------------------------------------------
## Read training fingerprints
####-----------------------------------------------------------------------------------------------------------------
## Chemotyper FPs: 779 Toxprints
df_chemotypes = pd.read_csv(path+'data/toxprint.txt', sep = ';', index_col='M_NAME') #Rename 'M_NAME' to 'CAS' in data file
## PubChem FPs: 881 bits
df_pubchem = pd.read_csv(path+'data/pubchem.txt', index_col='row ID')
####-----------------------------------------------------------------------------------------------------------------
## Read continuous descriptors
####-----------------------------------------------------------------------------------------------------------------
### OPERA descriptors
df_opera = pd.read_csv(path+'data/OPERA2.5_Pred.csv', index_col='MoleculeID')[['LogP_pred','pKa_a_pred', 'pKa_b_pred']] #In MOE: Right click on mol -> Name -> Extract -> new field 'CAS'
df_opera['pKa_pred']=df_opera[['pKa_a_pred','pKa_b_pred']].min(axis=1)
opera_scaler, opera = normalizeDescriptors(df_opera)#[['pKa_pred','LogP_pred']]
opera = opera[['pKa_pred','LogP_pred']]
## PADEL descriptors
df_padel = pd.read_csv(path+'data/padel.txt', index_col='Name').dropna()
padel_scaler, padel = normalizeDescriptors(df_padel)
## CDK descriptors
df_cdk = pd.read_csv(path+'data/cdk.txt', index_col='row ID').dropna() #Add CAS column to file
cdk_scaler, cdk = normalizeDescriptors(df_cdk)
#%%
####-----------------------------------------------------------------------------------------------------------------
## Save the normalization vector
####-----------------------------------------------------------------------------------------------------------------
pickle.dump(opera_scaler, open(path+'output/opera_scaler.sav', 'wb'))
pickle.dump(padel_scaler, open(path+'output/padel_scaler.sav', 'wb'))
pickle.dump(cdk_scaler, open(path+'output/cdk_scaler.sav', 'wb'))
#%%
####-----------------------------------------------------------------------------------------------------------------
## Features from the 5-fold CV model
####-----------------------------------------------------------------------------------------------------------------
fub_features = pd.read_csv(path+'output/Human.Funbound.plasma_Features.csv')
clint_features_clas = pd.read_csv(path+'output/Clint_Features_Classification.csv')
clint_features_reg = pd.read_csv(path+'output/Clint_Features_Regression.csv')
#%%
####-----------------------------------------------------------------------------------------------------------------
## Model for Fraction Unbound in Plasma
####-----------------------------------------------------------------------------------------------------------------
y_var = 'Human.Funbound.plasma'
# Create a new dataframe with chemical names and y variable value based on raw data
casList = list(set(data1.index.tolist()+data2.index.tolist()+data3.index.tolist()))
data = pd.DataFrame(index = casList, columns = ['Name',y_var])
# Update the training data. If y value is available from later data (data 2 or 3) use that, if not use from old data (data1)
for cas in data.index:
try:
if cas in data1.index:
data.loc[cas,'Name'] = data1.loc[cas,'Name']
data.loc[cas,y_var] = data1.loc[cas,y_var]
if cas in data2.index:
data.loc[cas,'Name'] = data2.loc[cas,'Name']
data.loc[cas,y_var] = data2.loc[cas,y_var]
except:
pass
data.dropna(inplace = True) #Retain data with y variable values
#%%
####-----------------------------------------------------------------------------------------------------------------
## Extract y data
####-----------------------------------------------------------------------------------------------------------------
Y = data[y_var]
## Set data for modeling
## Transform Y
Y[Y==1.0] = 0.99
Y[Y==0] = 0.005
Y_model = (1-Y)/Y
Y_model = Y_model.apply(lambda x: np.log10(x))
Y_index = Y_model.index
#%%
####-----------------------------------------------------------------------------------------------------------------
## Combine fingerprints
####-----------------------------------------------------------------------------------------------------------------
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1).dropna()
fingerprints = fingerprints.loc[Y_index,:].dropna()
# Select fingerprints from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in fub_features.ix[0,'Fingerprints'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("c]",'c') ##manually check the last entry and correct it
fingerprints = fingerprints.loc[:,retain]
####-----------------------------------------------------------------------------------------------------------------
## Combine descriptors
####-----------------------------------------------------------------------------------------------------------------
descriptors = pd.concat([padel, cdk], axis=1).dropna()
descriptors = descriptors.loc[Y_index,:].dropna()
# Select descriptors from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in fub_features.ix[0,'Padel+CDK'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
descriptors = descriptors.loc[:,retain]
####-----------------------------------------------------------------------------------------------------------------
## Combine all the descriptors and set the X and Y for training the model
####-----------------------------------------------------------------------------------------------------------------
data = pd.concat([Y_model, fingerprints, opera], axis=1).dropna(axis=0, how='any')
X_fub_model = data.ix[:, data.columns != y_var]
Y_fub_model = data[y_var]
meanY = np.mean(Y_fub_model)
stdY = np.std(Y_fub_model)
#%%
## Histogram of the final training set
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 6), dpi = 300)
Y_fub_model.hist(alpha = 0.75, color = 'r', grid = False)
plt.annotate('N = %d' %len(Y_fub_model), [-2.5,200], size = 20)
plt.annotate('$\mu = %0.2f$' %(meanY), [-2.5,185], size = 20)
plt.annotate('$\sigma = %0.2f$' %(stdY), [-2.5,170], size = 20)
plt.xlabel('Fub$_{tr}$', size = 24, labelpad = 10)
plt.ylabel('Frequency', size = 24, labelpad = 10)
plt.xticks(fontsize = 24)#, rotation = 90)
plt.yticks(fontsize = 24)
plt.savefig(path+'/output/%s_TrainingData.png' %y_var, bbox_inches='tight')
plt.show()
data.to_csv(path+'output/fub_trainingdata.csv', index_label='CASRN')
#%%
####-----------------------------------------------------------------------------------------------------------------
## Develop model
clf_fub1 = svm.SVR(epsilon = 0.1, C = 10, gamma = 0.01, kernel = "rbf")
clf_fub1 = clf_fub1.fit(X = X_fub_model, y = Y_fub_model)
clf_fub2 = RandomForestRegressor(max_features = 'auto', n_estimators = 1000, random_state = 5)
clf_fub2 = clf_fub2.fit(X = X_fub_model, y = Y_fub_model)
#
## Save the models to disk
pickle.dump(clf_fub1, open(path+'output/fub_svr.sav', 'wb'))
pickle.dump(clf_fub2, open(path+'output/fub_rf.sav', 'wb'))
#%%
###########################################################################
## Models for Intrinsic Clearance
###########################################################################
###########################################################################
## Read and analyze input data
###########################################################################
data1 = pd.read_csv(path+'data/Prachi-112117.txt', index_col = 'CAS').loc[:,['All.Compound.Names', 'Human.Funbound.plasma', 'Human.Clint']]
data1.rename(columns={'All.Compound.Names' : 'Name'}, inplace = True)
data2 = pd.read_excel(path+'data/AFFINITY_Model_Results-2018-02-27.xlsx', index_col = 'CAS').loc[:,['Name','Fup.Med']]
data2.rename(columns={'Name': 'All.Compound.Names','Fup.Med':'Human.Funbound.plasma'}, inplace = True)
data3 = pd.read_excel(path+'data/CLint-2018-03-01-Results.xlsx', index_col = 'CAS').loc[:,['Name','CLint.1uM.Median']]
data3.rename(columns={'Name': 'All.Compound.Names','CLint.1uM.Median':'Human.Clint'}, inplace = True)
#%%
## HTTK package data
# Set y variable
y_var = 'Human.Clint'
# Create a new dataframe with chemical names and y variable value based on raw data
casList = list(set(data1.index.tolist()+data2.index.tolist()+data3.index.tolist()))
#%%
data = pd.DataFrame(index = casList, columns = ['Name',y_var])
#%%
# Update the training data. If y value is available from later data (data 2 or 3) use that, if not use from old data (data1)
for cas in data.index:
try:
if cas in data1.index:
data.loc[cas,'Name'] = data1.loc[cas,'Name']
data.loc[cas,y_var] = data1.loc[cas,y_var]
if cas in data2.index:
data.loc[cas,'Name'] = data2.loc[cas,'Name']
data.loc[cas,y_var] = data2.loc[cas,y_var]
except:
pass
data.dropna(inplace = True) #Retain data with y variable values
#%%
## Transform the data: Bin the clearance variable for classification
Y = data[y_var]
Y_clas = Y.copy()
[Y_clas.set_value(idx, int(-3)) for idx in Y_clas[Y_clas <= 0.9].index]
[Y_clas.set_value(idx, int(-2)) for idx in Y_clas[(Y_clas > 0.9) & (Y_clas <= 50)].index]
[Y_clas.set_value(idx, int(-1)) for idx in Y_clas[Y_clas > 50].index]
Y_clas = pd.Series(Y_clas, index = Y.index)
low_median = Y[Y_clas[Y_clas==-3].index].median()
high_median = Y[Y_clas[Y_clas==-1].index].median()
###########################################################################
## Classification:
## Combine fingerprints and perform feature selection
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1).dropna()
fingerprints = fingerprints.loc[Y_clas.index,:].dropna()
# Select fingerprints from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in clint_features_clas.ix[0,'Fingerprints'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
fingerprints = fingerprints.loc[:,retain]
#%%
## Classification: Combine all the descriptors and set the X and Y for training the classification model
data = pd.concat([Y_clas, fingerprints, opera], axis=1).dropna(axis=0, how='any')
X_ClintClas_model = data.ix[:, data.columns != y_var]
Y_ClintClas_model = data[y_var]
#%%
data.to_csv(path+'output/clintclas_trainingdata.csv', index_label='CASRN')
#%%
## Histogram of the final training set
import matplotlib.pyplot as plt
plt.gcf().subplots_adjust(bottom=0.5)
plt.figure(figsize=[8,6], dpi = 300)
plt.hist(Y_ClintClas_model.values.tolist(), color = 'r', align = 'left', rwidth = 1)
plt.annotate('N = %d' %len(Y_ClintClas_model.values.tolist()), [-3.15,260], size = 24)
labels = ['Low', 'Medium', 'High']
plt.xticks([-3, -2, -1], labels, size = 18)
plt.xlabel('Transformed Clearance \n(for classification)', size = 28, labelpad = 5)
plt.ylabel('Frequency', size = 28, labelpad = 5)
plt.xticks(fontsize = 20)#, rotation = 90)
plt.yticks(fontsize = 20)
plt.savefig(path+'output/%sClas_TrainingData.png'%y_var, bbox_inches='tight')
#%%
###########################################################################
## Develop classification model
## Classification model
clf_clintClas = svm.SVC(C=10, decision_function_shape='ovo', gamma=0.01, kernel='rbf')
clf_clintClas = clf_clintClas.fit(X = X_ClintClas_model, y = Y_ClintClas_model.values.tolist())
#%%
###########################################################################
## Intrinsic Clearance Regression
###########################################################################
## Regression
## Extract y data for regression
Y_reg = Y[(Y > 0.9) & (Y <= 50)]
## Transform Y
Y_reg = Y_reg.apply(lambda x: np.log10(x))
## Combine fingerprints and perform feature selection
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1).dropna()
fingerprints = fingerprints.loc[Y_reg.index,:].dropna()
#%%
# Select fingerprints from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in clint_features_reg.ix[0,'Fingerprints'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
fingerprints = fingerprints.loc[:,retain]
descriptors = pd.concat([padel, cdk], axis=1).dropna()
descriptors = descriptors.loc[Y_clas.index,:].dropna()
# Select descriptors from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in clint_features_reg.ix[0,'Padel+CDK'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
descriptors = descriptors.loc[:,retain]
#%%
## Combine all the descriptors and set the X and Y for training the regression model
data = | pd.concat([Y_reg, fingerprints], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_yuval / 'hydro'
axis_path = work_yuval/'axis'
gis_path = work_yuval / 'gis'
ims_path = work_yuval / 'IMS_T'
hydro_ml_path = hydro_path / 'hydro_ML'
gnss_path = work_yuval / 'GNSS_stations'
# 'tela': 17135
hydro_pw_dict = {'nizn': 25191, 'klhv': 21105, 'yrcm': 55165,
'ramo': 56140, 'drag': 48125, 'dsea': 48192,
'spir': 56150, 'nrif': 60105, 'elat': 60190
}
hydro_st_name_dict = {25191: 'Lavan - new nizana road',
21105: 'Shikma - Tel milcha',
55165: 'Mamsheet',
56140: 'Ramon',
48125: 'Draga',
48192: 'Chiemar - down the cliff',
46150: 'Nekrot - Top',
60105: 'Yaelon - Kibutz Yahel',
60190: 'Solomon - Eilat'}
best_hp_models_dict = {'SVC': {'kernel': 'rbf', 'C': 1.0, 'gamma': 0.02,
'coef0': 0.0, 'degree': 1},
'RF': {'max_depth': 5, 'max_features': 'auto',
'min_samples_leaf': 1, 'min_samples_split': 2,
'n_estimators': 400},
'MLP': {'alpha': 0.1, 'activation': 'relu',
'hidden_layer_sizes': (10,10,10), 'learning_rate': 'constant',
'solver': 'lbfgs'}}
scorer_order = ['precision', 'recall', 'f1', 'accuracy', 'tss', 'hss']
tsafit_dict = {'lat': 30.985556, 'lon': 35.263056,
'alt': -35.75, 'dt_utc': '2018-04-26T10:15:00'}
axis_southern_stations = ['Dimo', 'Ohad', 'Ddse', 'Yotv', 'Elat', 'Raha', 'Yaha']
soi_axis_dict = {'yrcm': 'Dimo',
'slom': 'Ohad',
'dsea': 'Ddse',
'nrif': 'Yotv',
'elat': 'Elat',
'klhv': 'Raha',
'spir': 'Yaha'}
def plot_mean_abs_shap_values_features(SV, fix_xticklabels=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
features = ['pwv', 'pressure', 'DOY']
# sns.set_palette('Dark2', 6)
sns.set_theme(style='ticks', font_scale=1.5)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sv = np.abs(SV).mean('sample').sel(clas=0).reset_coords(drop=True)
gr_spec = [20, 20, 1]
fig, axes = plt.subplots(1, 3, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(features):
fe = [x for x in sv['feature'].values if f in x]
dsf = sv.sel(feature=fe).reset_coords(drop=True).to_dataframe()
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8, color='k', alpha=0.8)
axes[i].set_title(title)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
# axes[i].legend(handles=handles, labels=labels, prop={'size': fontsize-3}, loc='upper center')
axes[i].set_ylabel('mean(|SHAP value|)\n(average impact\non model output magnitude)')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
# n = sum(['pwv' in x for x in sv.feature.values])
axes[2].xaxis.set_ticklabels('')
axes[2].set_xlabel('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].tick_params()
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
filename = 'RF_shap_values_{}.png'.format('+'.join(features))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def read_binary_classification_shap_values_to_pandas(shap_values, X):
import xarray as xr
SV0 = X.copy(data=shap_values[0])
SV1 = X.copy(data=shap_values[1])
SV = xr.concat([SV0, SV1], dim='clas')
SV['clas'] = [0, 1]
return SV
def get_shap_values_RF_classifier(plot=True):
import shap
X, y = combine_pos_neg_from_nc_file()
ml = ML_Classifier_Switcher()
rf = ml.pick_model('RF')
rf.set_params(**best_hp_models_dict['RF'])
X = select_doy_from_feature_list(X, features=['pwv', 'pressure', 'doy'])
rf.fit(X, y)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X.values)
if plot:
shap.summary_plot(shap_values, X, feature_names=[
x for x in X.feature.values], max_display=49, sort=False)
return shap_values
def interpolate_pwv_to_tsafit_event(path=work_yuval, savepath=work_yuval):
import pandas as pd
import xarray as xr
from PW_stations import produce_geo_gnss_solved_stations
from interpolation_routines import interpolate_var_ds_at_multiple_dts
from aux_gps import save_ncfile
# get gnss soi-apn pwv data and geo-meta data:
geo_df = produce_geo_gnss_solved_stations(plot=False)
pw = xr.load_dataset(work_yuval/'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw = pw.sel(time=slice('2018-04-25', '2018-04-26'))
pw = pw.drop_vars(['elat', 'elro', 'csar', 'slom'])
# get tsafit data:
predict_df = pd.DataFrame(tsafit_dict, index=['tsafit'])
df_inter = interpolate_var_ds_at_multiple_dts(pw, geo_df, predict_df)
da=df_inter['interpolated_lr_fixed'].to_xarray()
da.name = 'pwv'
da.attrs['operation'] = 'interploated from SOI-APN PWV data'
da.attrs['WV scale height'] = 'variable from SOI-APN data'
da.attrs.update(**tsafit_dict)
if savepath is not None:
filename = 'Tsafit_PWV_event.nc'
save_ncfile(da, savepath, filename)
return da
def plot_tsafit_event(path=work_yuval):
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
da = xr.load_dataarray(path / 'Tsafit_PWV_event.nc')
fig, ax = plt.subplots(figsize=(11, 8))
da_sliced = da.sel(time=slice('2018-04-26T00:00:00', '2018-04-26T12:00:00'))
# da_sliced.name = 'PWV [mm]'
da_sliced = da_sliced.rename({'time': 'Time [UTC]'})
da_sliced.to_dataframe().plot(ax=ax, ylabel='PWV [mm]', linewidth=2, marker='o', legend=False)
dt = pd.to_datetime(da.attrs['dt_utc'])
ax.axvline(dt, color='r', linestyle='--', linewidth=2, label='T')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels=['PWV', 'Tsafit Flood Event'])
ax.grid(True)
# ax.set_xlabel('Time [UTC]')
fig.tight_layout()
fig.suptitle('PWV from SOI-APN over Tsafit area on 2018-04-26')
fig.subplots_adjust(top=0.941)
return fig
# TODO: treat all pwv from events as follows:
# For each station:
# 0) rolling mean to all pwv 1 hour
# 1) take 288 points before events, if < 144 gone then drop
# 2) interpolate them 12H using spline/other
# 3) then, check if dts coinside 1 day before, if not concat all dts+pwv for each station
# 4) prepare features, such as pressure, doy, try to get pressure near the stations and remove the longterm hour dayofyear
# pressure in BD anoms is highly correlated with SEDOM (0.9) and ELAT (0.88) so no need for local pressure features
# fixed filling with jerusalem centre since 2 drag events dropped due to lack of data 2018-11 2019-02 in pressure
# 5) feature addition: should be like pwv steps 1-3,
# 6) negative events should be sampled separtely, for
# 7) now prepare pwv and pressure to single ds with 1 hourly sample rate
# 8) produce positives and save them to file!
# 9) produce a way to get negatives considering the positives
# maybe implement permutaion importance to pwv ? see what is more important to
# the model in 24 hours ? only on SVC and MLP ?
# implemetn TSS and HSS scores and test them (make_scorer from confusion matrix)
# redo results but with inner and outer splits of 4, 4
# plot and see best_score per refit-scorrer - this is the best score of GridSearchCV on the entire
# train/validation subset per each outerfold - basically see if the test_metric increased after the gridsearchcv as it should
# use holdout set
# implement repeatedstratifiedkfold and run it...
# check for stability of the gridsearch CV...also run with 4-folds ?
# finalize the permutation_importances and permutation_test_scores
def prepare_tide_events_GNSS_dataset(hydro_path=hydro_path):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import xr_reindex_with_date_range
feats = xr.load_dataset(
hydro_path/'hydro_tides_hourly_features_with_positives.nc')
ds = feats['Tides'].to_dataset('GNSS').rename({'tide_event': 'time'})
da_list = []
for da in ds:
time = ds[da].dropna('time')
daa = time.copy(data=np.ones(time.shape))
daa['time'] = pd.to_datetime(time.values)
daa.name = time.name + '_tide'
da_list.append(daa)
ds = xr.merge(da_list)
li = [xr_reindex_with_date_range(ds[x], freq='H') for x in ds]
ds = xr.merge(li)
return ds
def select_features_from_X(X, features='pwv'):
if isinstance(features, str):
f = [x for x in X.feature.values if features in x]
X = X.sel(feature=f)
elif isinstance(features, list):
fs = []
for f in features:
fs += [x for x in X.feature.values if f in x]
X = X.sel(feature=fs)
return X
def combine_pos_neg_from_nc_file(hydro_path=hydro_path,
negative_sample_num=1,
seed=1, std=True):
from aux_gps import path_glob
from sklearn.utils import resample
import xarray as xr
import numpy as np
# import pandas as pd
if std:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_std*.nc')[-1]
else:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_*.nc')[-1]
ds = xr.open_dataset(file)
# get the positive features and produce target:
X_pos = ds['X_pos'].rename({'positive_sample': 'sample'})
y_pos = xr.DataArray(np.ones(X_pos['sample'].shape), dims=['sample'])
y_pos['sample'] = X_pos['sample']
# choose at random y_pos size of negative class:
X_neg = ds['X_neg'].rename({'negative_sample': 'sample'})
pos_size = y_pos['sample'].size
np.random.seed(seed)
# negatives = []
for n_samples in [x for x in range(negative_sample_num)]:
# dts = np.random.choice(X_neg['sample'], size=y_pos['sample'].size,
# replace=False)
# print(np.unique(dts).shape)
# negatives.append(X_neg.sel(sample=dts))
negative = resample(X_neg, replace=False,
n_samples=pos_size * negative_sample_num,
random_state=seed)
negatives = np.split(negative, negative_sample_num, axis=0)
Xs = []
ys = []
for X_negative in negatives:
y_neg = xr.DataArray(np.zeros(X_negative['sample'].shape), dims=['sample'])
y_neg['sample'] = X_negative['sample']
# now concat all X's and y's:
X = xr.concat([X_pos, X_negative], 'sample')
y = xr.concat([y_pos, y_neg], 'sample')
X.name = 'X'
Xs.append(X)
ys.append(y)
if len(negatives) == 1:
return Xs[0], ys[0]
else:
return Xs, ys
def drop_hours_in_pwv_pressure_features(X, last_hours=7, verbose=True):
import numpy as np
Xcopy = X.copy()
pwvs_to_drop = ['pwv_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pwvs_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pwvs_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pwvs_to_drop)
pressures_to_drop = ['pressure_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pressures_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pressures_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pressures_to_drop)
return Xcopy
def check_if_negatives_are_within_positives(neg_da, hydro_path=hydro_path):
import xarray as xr
import pandas as pd
pos_da = xr.open_dataset(
hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['X']
dt_pos = pos_da.sample.to_dataframe()
dt_neg = neg_da.sample.to_dataframe()
dt_all = dt_pos.index.union(dt_neg.index)
dff = pd.DataFrame(dt_all, index=dt_all)
dff = dff.sort_index()
samples_within = dff[(dff.diff()['sample'] <= pd.Timedelta(1, unit='D'))]
num = samples_within.size
print('samples that are within a day of each other: {}'.format(num))
print('samples are: {}'.format(samples_within))
return dff
def produce_negatives_events_from_feature_file(hydro_path=hydro_path, seed=42,
batches=1, verbose=1, std=True):
# do the same thing for pressure (as for pwv), but not for
import xarray as xr
import numpy as np
import pandas as pd
from aux_gps import save_ncfile
feats = xr.load_dataset(hydro_path / 'hydro_tides_hourly_features.nc')
feats = feats.rename({'doy': 'DOY'})
if std:
pos_filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
pos_filename = 'hydro_tides_hourly_features_with_positives.nc'
all_tides = xr.open_dataset(
hydro_path / pos_filename)['X_pos']
# pos_tides = xr.open_dataset(hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['tide_datetimes']
tides = xr.open_dataset(
hydro_path / pos_filename)['Tides']
# get the positives (tide events) for each station:
df_stns = tides.to_dataset('GNSS').to_dataframe()
# get all positives (tide events) for all stations:
df = all_tides.positive_sample.to_dataframe()['positive_sample']
df.columns = ['sample']
stns = [x for x in hydro_pw_dict.keys()]
other_feats = ['DOY', 'doy_sin', 'doy_cos']
# main stns df features (pwv)
pwv_df = feats[stns].to_dataframe()
pressure = feats['bet-dagan'].to_dataframe()['bet-dagan']
# define the initial no_choice_dt_range from the positive dt_range:
no_choice_dt_range = [pd.date_range(
start=dt, periods=48, freq='H') for dt in df]
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack(no_choice_dt_range)))
dts_to_choose_from = pwv_df.index.difference(no_choice_dt_range)
# dts_to_choose_from_pressure = pwv_df.index.difference(no_choice_dt_range)
# loop over all stns and produce negative events:
np.random.seed(seed)
neg_batches = []
for i in np.arange(1, batches + 1):
if verbose >= 0:
print('preparing batch {}:'.format(i))
neg_stns = []
for stn in stns:
dts_df = df_stns[stn].dropna()
pwv = pwv_df[stn].dropna()
# loop over all events in on stn:
negatives = []
negatives_pressure = []
# neg_samples = []
if verbose >= 1:
print('finding negatives for station {}, events={}'.format(
stn, len(dts_df)))
# print('finding negatives for station {}, dt={}'.format(stn, dt.strftime('%Y-%m-%d %H:%M')))
cnt = 0
while cnt < len(dts_df):
# get random number from each stn pwv:
# r = np.random.randint(low=0, high=len(pwv.index))
# random_dt = pwv.index[r]
random_dt = np.random.choice(dts_to_choose_from)
negative_dt_range = pd.date_range(
start=random_dt, periods=24, freq='H')
if not (no_choice_dt_range.intersection(negative_dt_range)).empty:
# print('#')
if verbose >= 2:
print('Overlap!')
continue
# get the actual pwv and check it is full (24hours):
negative = pwv.loc[pwv.index.intersection(negative_dt_range)]
neg_pressure = pressure.loc[pwv.index.intersection(
negative_dt_range)]
if len(negative.dropna()) != 24 or len(neg_pressure.dropna()) != 24:
# print('!')
if verbose >= 2:
print('NaNs!')
continue
if verbose >= 2:
print('number of dts that are already chosen: {}'.format(
len(no_choice_dt_range)))
negatives.append(negative)
negatives_pressure.append(neg_pressure)
# now add to the no_choice_dt_range the negative dt_range we just aquired:
negative_dt_range_with_padding = pd.date_range(
start=random_dt-pd.Timedelta(24, unit='H'), end=random_dt+pd.Timedelta(23, unit='H'), freq='H')
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack([no_choice_dt_range, negative_dt_range_with_padding])))
dts_to_choose_from = dts_to_choose_from.difference(
no_choice_dt_range)
if verbose >= 2:
print('number of dts to choose from: {}'.format(
len(dts_to_choose_from)))
cnt += 1
neg_da = xr.DataArray(negatives, dims=['sample', 'feature'])
neg_da['feature'] = ['{}_{}'.format(
'pwv', x) for x in np.arange(1, 25)]
neg_samples = [x.index[0] for x in negatives]
neg_da['sample'] = neg_samples
neg_pre_da = xr.DataArray(
negatives_pressure, dims=['sample', 'feature'])
neg_pre_da['feature'] = ['{}_{}'.format(
'pressure', x) for x in np.arange(1, 25)]
neg_pre_samples = [x.index[0] for x in negatives_pressure]
neg_pre_da['sample'] = neg_pre_samples
neg_da = xr.concat([neg_da, neg_pre_da], 'feature')
neg_da = neg_da.sortby('sample')
neg_stns.append(neg_da)
da_stns = xr.concat(neg_stns, 'sample')
da_stns = da_stns.sortby('sample')
# now loop over the remaining features (which are stns agnostic)
# and add them with the same negative datetimes of the pwv already aquired:
dts = [pd.date_range(x.item(), periods=24, freq='H')
for x in da_stns['sample']]
dts_samples = [x[0] for x in dts]
other_feat_list = []
for feat in feats[other_feats]:
# other_feat_sample_list = []
da_other = xr.DataArray(feats[feat].sel(time=dts_samples).values, dims=['sample'])
# for dt in dts_samples:
# da_other = xr.DataArray(feats[feat].sel(
# time=dt).values, dims=['feature'])
da_other['sample'] = dts_samples
other_feat_list.append(da_other)
# other_feat_da = xr.concat(other_feat_sample_list, 'feature')
da_other_feats = xr.concat(other_feat_list, 'feature')
da_other_feats['feature'] = other_feats
da_stns = xr.concat([da_stns, da_other_feats], 'feature')
neg_batches.append(da_stns)
neg_batch_da = xr.concat(neg_batches, 'sample')
# neg_batch_da['batch'] = np.arange(1, batches + 1)
neg_batch_da.name = 'X_neg'
feats['X_neg'] = neg_batch_da
feats['X_pos'] = all_tides
feats['X_pwv_stns'] = tides
# feats['tide_datetimes'] = pos_tides
feats = feats.rename({'sample': 'negative_sample'})
if std:
filename = 'hydro_tides_hourly_features_with_positives_negatives_std_{}.nc'.format(
batches)
else:
filename = 'hydro_tides_hourly_features_with_positives_negatives_{}.nc'.format(
batches)
save_ncfile(feats, hydro_path, filename)
return neg_batch_da
def produce_positives_from_feature_file(hydro_path=hydro_path, std=True):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import save_ncfile
# load features:
if std:
file = hydro_path / 'hydro_tides_hourly_features_std.nc'
else:
file = hydro_path / 'hydro_tides_hourly_features.nc'
feats = xr.load_dataset(file)
feats = feats.rename({'doy': 'DOY'})
# load positive event for each station:
dfs = [read_station_from_tide_database(hydro_pw_dict.get(
x), rounding='1H') for x in hydro_pw_dict.keys()]
dfs = check_if_tide_events_from_stations_are_within_time_window(
dfs, days=1, rounding=None, return_hs_list=True)
da_list = []
positives_per_station = []
for i, feat in enumerate(feats):
try:
_, _, pr = produce_pwv_days_before_tide_events(feats[feat], dfs[i],
plot=False, rolling=None,
days_prior=1,
drop_thresh=0.75,
max_gap='6H',
verbose=0)
print('getting positives from station {}'.format(feat))
positives = [pd.to_datetime(
(x[-1].time + pd.Timedelta(1, unit='H')).item()) for x in pr]
da = xr.DataArray(pr, dims=['sample', 'feature'])
da['sample'] = positives
positives_per_station.append(positives)
da['feature'] = ['pwv_{}'.format(x) for x in np.arange(1, 25)]
da_list.append(da)
except IndexError:
continue
da_pwv = xr.concat(da_list, 'sample')
da_pwv = da_pwv.sortby('sample')
# now add more features:
da_list = []
for feat in ['bet-dagan']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt_end in da_pwv.sample:
dt_st = pd.to_datetime(dt_end.item()) - pd.Timedelta(24, unit='H')
dt_end_end = pd.to_datetime(
dt_end.item()) - pd.Timedelta(1, unit='H')
positive = feats[feat].sel(time=slice(dt_st, dt_end_end))
positives.append(positive)
da = xr.DataArray(positives, dims=['sample', 'feature'])
da['sample'] = da_pwv.sample
if feat == 'bet-dagan':
feat_name = 'pressure'
else:
feat_name = feat
da['feature'] = ['{}_{}'.format(feat_name, x)
for x in np.arange(1, 25)]
da_list.append(da)
da_f = xr.concat(da_list, 'feature')
da_list = []
for feat in ['DOY', 'doy_sin', 'doy_cos']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt in da_pwv.sample:
positive = feats[feat].sel(time=dt)
positives.append(positive)
da = xr.DataArray(positives, dims=['sample'])
da['sample'] = da_pwv.sample
# da['feature'] = feat
da_list.append(da)
da_ff = xr.concat(da_list, 'feature')
da_ff['feature'] = ['DOY', 'doy_sin', 'doy_cos']
da = xr.concat([da_pwv, da_f, da_ff], 'feature')
if std:
filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
filename = 'hydro_tides_hourly_features_with_positives.nc'
feats['X_pos'] = da
# now add positives per stations:
pdf = pd.DataFrame(positives_per_station).T
pdf.index.name = 'tide_event'
pos_da = pdf.to_xarray().to_array('GNSS')
pos_da['GNSS'] = [x for x in hydro_pw_dict.keys()]
pos_da.attrs['info'] = 'contains the datetimes of the tide events per GNSS station.'
feats['Tides'] = pos_da
# rename sample to positive sample:
feats = feats.rename({'sample': 'positive_sample'})
save_ncfile(feats, hydro_path, filename)
return feats
def prepare_features_and_save_hourly(work_path=work_yuval, ims_path=ims_path,
savepath=hydro_path, std=True):
import xarray as xr
from aux_gps import save_ncfile
import numpy as np
# pwv = xr.load_dataset(
if std:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc'
pre_filename = 'IMS_BD_hourly_anoms_std_ps_1964-2020.nc'
else:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc'
pre_filename = 'IMS_BD_hourly_anoms_ps_1964-2020.nc'
# work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pwv = xr.load_dataset(work_path / pwv_filename)
pwv_stations = [x for x in hydro_pw_dict.keys()]
pwv = pwv[pwv_stations]
# pwv = pwv.rolling(time=12, keep_attrs=True).mean(keep_attrs=True)
pwv = pwv.resample(time='1H', keep_attrs=True).mean(keep_attrs=True)
# bd = xr.load_dataset(ims_path / 'IMS_BD_anoms_5min_ps_1964-2020.nc')
bd = xr.load_dataset(ims_path / pre_filename)
# min_time = pwv.dropna('time')['time'].min()
# bd = bd.sel(time=slice('1996', None)).resample(time='1H').mean()
bd = bd.sel(time=slice('1996', None))
pressure = bd['bet-dagan']
doy = pwv['time'].copy(data=pwv['time'].dt.dayofyear)
doy.name = 'doy'
doy_sin = np.sin(doy * np.pi / 183)
doy_sin.name = 'doy_sin'
doy_cos = np.cos(doy * np.pi / 183)
doy_cos.name = 'doy_cos'
ds = xr.merge([pwv, pressure, doy, doy_sin, doy_cos])
if std:
filename = 'hydro_tides_hourly_features_std.nc'
else:
filename = 'hydro_tides_hourly_features.nc'
save_ncfile(ds, savepath, filename)
return ds
def plot_all_decompositions(X, y, n=2):
import xarray as xr
models = [
'PCA',
'LDA',
'ISO_MAP',
'LLE',
'LLE-modified',
'LLE-hessian',
'LLE-ltsa',
'MDA',
'RTE',
'SE',
'TSNE',
'NCA']
names = [
'Principal Components',
'Linear Discriminant',
'Isomap',
'Locally Linear Embedding',
'Modified LLE',
'Hessian LLE',
'Local Tangent Space Alignment',
'MDS embedding',
'Random forest',
'Spectral embedding',
't-SNE',
'NCA embedding']
name_dict = dict(zip(models, names))
da = xr.DataArray(models, dims=['model'])
da['model'] = models
fg = xr.plot.FacetGrid(da, col='model', col_wrap=4,
sharex=False, sharey=False)
for model_str, ax in zip(da['model'].values, fg.axes.flatten()):
model = model_str.split('-')[0]
method = model_str.split('-')[-1]
if model == method:
method = None
try:
ax = scikit_decompose(X, y, model=model, n=n, method=method, ax=ax)
except ValueError:
pass
ax.set_title(name_dict[model_str])
ax.set_xlabel('')
ax.set_ylabel('')
fg.fig.suptitle('various decomposition projections (n={})'.format(n))
return
def scikit_decompose(X, y, model='PCA', n=2, method=None, ax=None):
from sklearn import (manifold, decomposition, ensemble,
discriminant_analysis, neighbors)
import matplotlib.pyplot as plt
import pandas as pd
# from mpl_toolkits.mplot3d import Axes3D
n_neighbors = 30
if model == 'PCA':
X_decomp = decomposition.TruncatedSVD(n_components=n).fit_transform(X)
elif model == 'LDA':
X2 = X.copy()
X2.values.flat[::X.shape[1] + 1] += 0.01
X_decomp = discriminant_analysis.LinearDiscriminantAnalysis(n_components=n
).fit_transform(X2, y)
elif model == 'ISO_MAP':
X_decomp = manifold.Isomap(
n_neighbors, n_components=n).fit_transform(X)
elif model == 'LLE':
# method = 'standard', 'modified', 'hessian' 'ltsa'
if method is None:
method = 'standard'
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method=method)
X_decomp = clf.fit_transform(X)
elif model == 'MDA':
clf = manifold.MDS(n_components=n, n_init=1, max_iter=100)
X_decomp = clf.fit_transform(X)
elif model == 'RTE':
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=n)
X_decomp = pca.fit_transform(X_transformed)
elif model == 'SE':
embedder = manifold.SpectralEmbedding(n_components=n, random_state=0,
eigen_solver="arpack")
X_decomp = embedder.fit_transform(X)
elif model == 'TSNE':
tsne = manifold.TSNE(n_components=n, init='pca', random_state=0)
X_decomp = tsne.fit_transform(X)
elif model == 'NCA':
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=n, random_state=0)
X_decomp = nca.fit_transform(X, y)
df = pd.DataFrame(X_decomp)
df.columns = [
'{}_{}'.format(
model,
x +
1) for x in range(
X_decomp.shape[1])]
df['flood'] = y
df['flood'] = df['flood'].astype(int)
df_1 = df[df['flood'] == 1]
df_0 = df[df['flood'] == 0]
if X_decomp.shape[1] == 1:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='r', marker='x',
label='0',
s=50)
elif X_decomp.shape[1] == 2:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='r',
label='0',
s=50)
elif X_decomp.shape[1] == 3:
ax = plt.figure().gca(projection='3d')
# df_1.plot.scatter(x='{}_1'.format(model), y='{}_2'.format(model), z='{}_3'.format(model), color='b', label='1', s=50, ax=threedee)
ax.scatter(df_1['{}_1'.format(model)],
df_1['{}_2'.format(model)],
df_1['{}_3'.format(model)],
color='b',
label='1',
s=50)
ax.scatter(df_0['{}_1'.format(model)],
df_0['{}_2'.format(model)],
df_0['{}_3'.format(model)],
color='r',
label='0',
s=50)
ax.set_xlabel('{}_1'.format(model))
ax.set_ylabel('{}_2'.format(model))
ax.set_zlabel('{}_3'.format(model))
return ax
def permutation_scikit(X, y, cv=False, plot=True):
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
if not cv:
clf = SVC(C=0.01, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.032374575428176434,
kernel='poly', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf = SVC(kernel='linear')
# clf = LinearDiscriminantAnalysis()
cv = StratifiedKFold(4, shuffle=True)
# cv = KFold(4, shuffle=True)
n_classes = 2
score, permutation_scores, pvalue = permutation_test_score(
clf, X, y, scoring="f1", cv=cv, n_permutations=1000, n_jobs=-1, verbose=2)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
else:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=42)
param_grid = {
'C': np.logspace(-2, 3, 50), 'gamma': np.logspace(-2, 3, 50),
'kernel': ['rbf', 'poly', 'sigmoid']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
print(grid.best_estimator_)
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test, grid_predictions))
return
def grab_y_true_and_predict_from_sklearn_model(model, X, y, cv,
kfold_name='inner_kfold'):
from sklearn.model_selection import GridSearchCV
import xarray as xr
import numpy as np
if isinstance(model, GridSearchCV):
model = model.best_estimator_
ds_list = []
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_true = y[val]
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
y_true_da = xr.DataArray(y_true, dims=['sample'])
y_pred_da = xr.DataArray(y_pred, dims=['sample'])
y_prob_da = xr.DataArray(lr_probs, dims=['sample'])
ds = xr.Dataset()
ds['y_true'] = y_true_da
ds['y_pred'] = y_pred_da
ds['y_prob'] = y_prob_da
ds['sample'] = np.arange(0, len(X[val]))
ds_list.append(ds)
ds = xr.concat(ds_list, kfold_name)
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
return ds
def produce_ROC_curves_from_model(model, X, y, cv, kfold_name='inner_kfold'):
import numpy as np
import xarray as xr
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# TODO: collect all predictions and y_tests from this, also predict_proba
# and save, then calculte everything elsewhere.
if isinstance(model, GridSearchCV):
model = model.best_estimator_
tprs = []
aucs = []
pr = []
pr_aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
precision, recall, _ = precision_recall_curve(y[val], lr_probs)
pr.append(recall)
average_precision = average_precision_score(y[val], y_pred)
pr_aucs.append(average_precision)
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
# std_auc = np.std(aucs)
# std_tpr = np.std(tprs, axis=0)
tpr_da = xr.DataArray(tprs, dims=[kfold_name, 'fpr'])
auc_da = xr.DataArray(aucs, dims=[kfold_name])
ds = xr.Dataset()
ds['TPR'] = tpr_da
ds['AUC'] = auc_da
ds['fpr'] = mean_fpr
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
# variability for each tpr is ds['TPR'].std('kfold')
return ds
def cross_validation_with_holdout(X, y, model_name='SVC', features='pwv',
n_splits=3, test_ratio=0.25,
scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
if param_grid == 'light':
print(np.unique(X.feature.values))
# first take out the hold-out set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=seed,
stratify=y)
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_holdout_{}_{}_{}_{}_{}_{}_{}.pkl'.format(
model_name, '+'.join(features), '+'.join(scorers), n_splits,
int(test_ratio*100), param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
# gr, _ = process_gridsearch_results(
# gr_search, model_name, split_dim='kfold', features=X.feature.values)
# remove_digits = str.maketrans('', '', digits)
# features = list(set([x.translate(remove_digits).split('_')[0]
# for x in X.feature.values]))
# # add more attrs, features etc:
# gr.attrs['features'] = features
return gr_search
def select_doy_from_feature_list(X, model_name='RF', features='pwv'):
# first if RF chosen, replace the cyclic coords of DOY (sin and cos) with
# the DOY itself.
if isinstance(features, list):
feats = features.copy()
else:
feats = features
if model_name == 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(features, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(features, str):
feats = ['doy_sin']
feats.append('doy_cos')
X = select_features_from_X(X, feats)
return X
def single_cross_validation(X_val, y_val, model_name='SVC', features='pwv',
n_splits=4, scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None, outer_split='1-1'):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X_val, model_name, features)
y = y_val
if param_grid == 'light':
print(np.unique(X.feature.values))
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(
n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
if outer_split == '1-1':
cv_type = 'holdout'
print('holdout cv is selected.')
else:
cv_type = 'nested'
print('nested cv {} out of {}.'.format(
outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}_{}_{}.pkl'.format(cv_type,
model_name, '+'.join(features), '+'.join(
scorers), n_splits,
outer_split, param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_cv_params_to_file(cv_obj, path, name):
import pandas as pd
di = vars(cv_obj)
splitter_type = cv_obj.__repr__().split('(')[0]
di['splitter_type'] = splitter_type
(pd.DataFrame.from_dict(data=di, orient='index')
.to_csv(path / '{}.csv'.format(name), header=False))
print('{}.csv saved to {}.'.format(name, path))
return
def read_cv_params_and_instantiate(filepath):
import pandas as pd
from sklearn.model_selection import StratifiedKFold
df = pd.read_csv(filepath, header=None, index_col=0)
d = {}
for row in df.iterrows():
dd = pd.to_numeric(row[1], errors='ignore')
if dd.item() == 'True' or dd.item() == 'False':
dd = dd.astype(bool)
d[dd.to_frame().columns.item()] = dd.item()
s_type = d.pop('splitter_type')
if s_type == 'StratifiedKFold':
cv = StratifiedKFold(**d)
return cv
def nested_cross_validation_procedure(X, y, model_name='SVC', features='pwv',
outer_splits=4, inner_splits=2,
refit_scorer='roc_auc',
scorers=['f1', 'recall', 'tss', 'hss',
'roc_auc', 'precision',
'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1):
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.inspection import permutation_importance
from string import digits
import numpy as np
import xarray as xr
assert refit_scorer in scorers
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
# if model_name == 'RF':
# doy = X['sample'].dt.dayofyear
# sel_doy = [x for x in X.feature.values if 'doy_sin' in x]
# doy_X = doy.broadcast_like(X.sel(feature=sel_doy))
# doy_X['feature'] = [
# 'doy_{}'.format(x) for x in range(
# doy_X.feature.size)]
# no_doy = [x for x in X.feature.values if 'doy' not in x]
# X = X.sel(feature=no_doy)
# X = xr.concat([X, doy_X], 'feature')
# else:
# # first slice X for features:
# if isinstance(features, str):
# f = [x for x in X.feature.values if features in x]
# X = X.sel(feature=f)
# elif isinstance(features, list):
# fs = []
# for f in features:
# fs += [x for x in X.feature.values if f in x]
# X = X.sel(feature=fs)
if param_grid == 'light':
print(np.unique(X.feature.values))
# configure the cross-validation procedure
cv_inner = StratifiedKFold(n_splits=inner_splits, shuffle=True,
random_state=seed)
print('Inner CV StratifiedKfolds of {}.'.format(inner_splits))
# define the model and search space:
ml = ML_Classifier_Switcher()
if param_grid == 'light':
print('disgnostic mode light.')
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv_inner, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=refit_scorer, return_train_score=True)
# gr.fit(X, y)
# configure the cross-validation procedure
cv_outer = StratifiedKFold(
n_splits=outer_splits, shuffle=True, random_state=seed)
# execute the nested cross-validation
scores_est_dict = cross_validate(gr_search, X, y,
scoring=scores_dict,
cv=cv_outer, n_jobs=n_jobs,
return_estimator=True, verbose=verbose)
# perm = []
# for i, (train, val) in enumerate(cv_outer.split(X, y)):
# gr_model = scores_est_dict['estimator'][i]
# gr_model.fit(X[train], y[train])
# r = permutation_importance(gr_model, X[val], y[val],scoring='f1',
# n_repeats=30, n_jobs=-1,
# random_state=0)
# perm.append(r)
# get the test scores:
test_keys = [x for x in scores_est_dict.keys() if 'test' in x]
ds = xr.Dataset()
for key in test_keys:
ds[key] = xr.DataArray(scores_est_dict[key], dims=['outer_kfold'])
preds_ds = []
gr_ds = []
for est in scores_est_dict['estimator']:
gr, _ = process_gridsearch_results(
est, model_name, split_dim='inner_kfold', features=X.feature.values)
# somehow save gr:
gr_ds.append(gr)
preds_ds.append(
grab_y_true_and_predict_from_sklearn_model(est, X, y, cv_inner))
# tpr_ds.append(produce_ROC_curves_from_model(est, X, y, cv_inner))
dss = xr.concat(preds_ds, 'outer_kfold')
gr_dss = xr.concat(gr_ds, 'outer_kfold')
dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
gr_dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
# aggragate results:
dss = xr.merge([ds, dss])
dss = xr.merge([dss, gr_dss])
dss.attrs = gr_dss.attrs
dss.attrs['outer_kfold_splits'] = outer_splits
remove_digits = str.maketrans('', '', digits)
features = list(set([x.translate(remove_digits).split('_')[0]
for x in X.feature.values]))
# add more attrs, features etc:
dss.attrs['features'] = features
# rename major data_vars with model name:
# ys = [x for x in dss.data_vars if 'y_' in x]
# new_ys = [y + '_{}'.format(model_name) for y in ys]
# dss = dss.rename(dict(zip(ys, new_ys)))
# new_test_keys = [y + '_{}'.format(model_name) for y in test_keys]
# dss = dss.rename(dict(zip(test_keys, new_test_keys)))
# if isinstance(X.attrs['pwv_id'], list):
# dss.attrs['pwv_id'] = '-'.join(X.attrs['pwv_id'])
# else:
# dss.attrs['pwv_id'] = X.attrs['pwv_id']
# if isinstance(y.attrs['hydro_station_id'], list):
# dss.attrs['hs_id'] = '-'.join([str(x) for x in y.attrs['hydro_station_id']])
# else:
# dss.attrs['hs_id'] = y.attrs['hydro_station_id']
# dss.attrs['hydro_max_flow'] = y.attrs['max_flow']
# dss.attrs['neg_pos_ratio'] = y.attrs['neg_pos_ratio']
# save results to file:
if savepath is not None:
save_cv_results(dss, savepath=savepath)
return dss
# def ML_main_procedure(X, y, estimator=None, model_name='SVC', features='pwv',
# val_size=0.18, n_splits=None, test_size=0.2, seed=42, best_score='f1',
# savepath=None, plot=True):
# """split the X,y for train and test, either do HP tuning using HP_tuning
# with val_size or use already tuned (or not) estimator.
# models to play with = MLP, RF and SVC.
# n_splits = 2, 3, 4.
# features = pwv, pressure.
# best_score = f1, roc_auc, accuracy.
# can do loop on them. RF takes the most time to tune."""
# X = select_features_from_X(X, features)
# X_train, X_test, y_train, y_test = train_test_split(X, y,
# test_size=test_size,
# shuffle=True,
# random_state=seed)
# # do HP_tuning:
# if estimator is None:
# cvr, model = HP_tuning(X_train, y_train, model_name=model_name, val_size=val_size, test_size=test_size,
# best_score=best_score, seed=seed, savepath=savepath, n_splits=n_splits)
# else:
# model = estimator
# if plot:
# ax = plot_many_ROC_curves(model, X_test, y_test, name=model_name,
# ax=None)
# return ax
# else:
# return model
def plot_hyper_parameters_heatmaps_from_nested_CV_model(dss, path=hydro_path, model_name='MLP',
features='pwv+pressure+doy', save=True):
import matplotlib.pyplot as plt
ds = dss.sel(features=features).reset_coords(drop=True)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
ds = ds[[x for x in ds if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
cmap_hp_dict = {
'alpha': seq, 'activation': cat,
'hidden_layer_sizes': cat, 'learning_rate': cat,
'solver': cat, 'kernel': cat, 'C': seq,
'gamma': seq, 'degree': seq, 'coef0': seq,
'max_depth': seq, 'max_features': cat,
'min_samples_leaf': seq, 'min_samples_split': seq,
'n_estimators': seq
}
# fix stuff for SVC:
if model_name == 'SVC':
ds['degree'] = ds['degree'].where(ds['kernel']=='poly')
ds['coef0'] = ds['coef0'].where(ds['kernel']=='poly')
# da = ds.to_arrray('hyper_parameters')
# fg = xr.plot.FacetGrid(
# da,
# col='hyper_parameters',
# sharex=False,
# sharey=False, figsize=(16, 10))
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 10))
for i, da in enumerate(ds):
df = ds[da].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df.index.name = 'Outer Split'
try:
df = df.astype(float).round(2)
except ValueError:
pass
cmap = cmap_hp_dict.get(da, 'Set1')
plot_heatmap_for_hyper_parameters_df(df, ax=axes[i], title=da, cmap=cmap)
fig.tight_layout()
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_heatmaps_for_hyper_parameters_data_splits(df1, df2, axes=None,
cmap='colorblind',
title=None, fig=None,
cbar_params=[.92, .12, .03, .75],
fontsize=12,
val_type='float'):
import pandas as pd
import seaborn as sns
import numpy as np
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
df1 = df1.astype(eval(val_type))
df2 = df2.astype(eval(val_type))
arr = pd.concat([df1, df2], axis=0).values.ravel()
value_to_int = {j: i for i, j in enumerate(
np.unique(arr))} # like you did
# try:
# sorted_v_to_i = dict(sorted(value_to_int.items()))
# except TypeError:
# sorted_v_to_i = value_to_int
# print(value_to_int)
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap_list = sns.color_palette(cmap, n)
if val_type == 'float':
# print([value_to_int.keys()])
cbar_ticklabels = ['{:.2g}'.format(x) for x in value_to_int.keys()]
elif val_type == 'int':
cbar_ticklabels = [int(x) for x in value_to_int.keys()]
elif val_type == 'str':
cbar_ticklabels = [x for x in value_to_int.keys()]
if 'nan' in value_to_int.keys():
cmap_list[-1] = (0.5, 0.5, 0.5)
new_value_to_int = {}
for key, val in value_to_int.items():
try:
new_value_to_int[str(int(float(key)))] = val
except ValueError:
new_value_to_int['NR'] = val
cbar_ticklabels = [x for x in new_value_to_int.keys()]
# u1 = np.unique(df1.replace(value_to_int)).astype(int)
# cmap1 = [cmap_list[x] for x in u1]
# u2 = np.unique(df2.replace(value_to_int)).astype(int)
# cmap2 = [cmap_list[x] for x in u2]
# prepare normalizer
## Prepare bins for the normalizer
norm_bins = np.sort([*value_to_int.values()]) + 0.5
norm_bins = np.insert(norm_bins, 0, np.min(norm_bins) - 1.0)
# print(norm_bins)
## Make normalizer and formatter
norm = matplotlib.colors.BoundaryNorm(norm_bins, n, clip=True)
# normalizer = Normalize(np.array([x for x in value_to_int.values()])[0],np.array([x for x in value_to_int.values()])[-1])
# im=cm.ScalarMappable(norm=normalizer)
if axes is None:
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False)
# divider = make_axes_locatable([axes[0], axes[1]])
# cbar_ax = divider.append_axes('right', size='5%', pad=0.05)
cbar_ax = fig.add_axes(cbar_params)
sns.heatmap(df1.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[0], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
sns.heatmap(df2.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[1], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
# else:
# ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
# ax=ax, linewidth=1, linecolor='k',
# square=False, cbar_kws={"shrink": .9})
if title is not None:
axes[0].set_title(title, fontsize=fontsize)
for ax in axes:
ax.set_xticklabels(ax.get_xticklabels(), ha='right', va='top', rotation=45)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize, direction='out', bottom=True,
left=True, length=2)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
# colorbar = axes[0].collections[0].colorbar
# diff = norm_bins[1:] - norm_bins[:-1]
# tickz = norm_bins[:-1] + diff / 2
colorbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=matplotlib.colors.ListedColormap(cmap_list)), ax=[axes[0], axes[1]],
shrink=1, pad=0.05, cax=cbar_ax)
# colorbar = plt.gca().images[-1].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.ax.set_yticklabels(cbar_ticklabels, fontsize=fontsize-2)
return axes
def plot_hyper_parameters_heatmap_data_splits_per_model(dss4, dss5, fontsize=14,
save=True, model_name='SVC',
features='pwv+pressure+doy'):
import matplotlib.pyplot as plt
# import seaborn as sns
fig, axes = plt.subplots(2, 5, sharex=True, sharey=False ,figsize=(16, 5))
ds4 = dss4.sel(features=features).reset_coords(drop=True)
ds5 = dss5.sel(features=features).reset_coords(drop=True)
ds4 = ds4.reindex(scorer=scorer_order)
ds5 = ds5.reindex(scorer=scorer_order)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
if model_name == 'MLP':
adj_dict=dict(
top=0.946,
bottom=0.145,
left=0.046,
right=0.937,
hspace=0.121,
wspace=0.652)
cb_st = 0.167
cb_mul = 0.193
else:
adj_dict=dict(
wspace = 0.477,
top=0.921,
bottom=0.17,
left=0.046,
right=0.937,
hspace=0.121)
cb_st = 0.18
cb_mul = 0.19
ds4 = ds4[[x for x in ds4 if x not in non_hp_vars]]
ds5 = ds5[[x for x in ds5 if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
hp_dict = {
'alpha': ['Reds', 'float'], 'activation': ['Set1_r', 'str'],
'hidden_layer_sizes': ['Paired', 'str'], 'learning_rate': ['Spectral_r', 'str'],
'solver': ['Dark2', 'str'], 'kernel': ['Dark2', 'str'], 'C': ['Blues', 'float'],
'gamma': ['Oranges', 'float'], 'degree': ['Greens', 'str'], 'coef0': ['Spectral', 'str'],
'max_depth': ['Blues', 'int'], 'max_features': ['Dark2', 'str'],
'min_samples_leaf': ['Greens', 'int'], 'min_samples_split': ['Reds', 'int'],
'n_estimators': ['Oranges', 'int']
}
# fix stuff for SVC:
if model_name == 'SVC':
ds4['degree'] = ds4['degree'].where(ds4['kernel']=='poly')
ds4['coef0'] = ds4['coef0'].where(ds4['kernel']=='poly')
ds5['degree'] = ds5['degree'].where(ds5['kernel']=='poly')
ds5['coef0'] = ds5['coef0'].where(ds5['kernel']=='poly')
for i, (da4, da5) in enumerate(zip(ds4, ds5)):
df4 = ds4[da4].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df5 = ds5[da5].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df4.index.name = 'Outer Split'
df5.index.name = 'Outer Split'
# try:
# df4 = df4.astype(float).round(2)
# df5 = df5.astype(float).round(2)
# except ValueError:
# pass
cmap = hp_dict.get(da4, 'Set1')[0]
val_type = hp_dict.get(da4, 'int')[1]
cbar_params = [cb_st + cb_mul*float(i), .175, .01, .71]
plot_heatmaps_for_hyper_parameters_data_splits(df4,
df5,
axes=[axes[0, i], axes[1, i]],
fig=fig,
title=da4,
cmap=cmap,
cbar_params=cbar_params,
fontsize=fontsize,
val_type=val_type)
if i > 0 :
axes[0, i].set_ylabel('')
axes[0, i].yaxis.set_tick_params(labelleft=False)
axes[1, i].set_ylabel('')
axes[1, i].yaxis.set_tick_params(labelleft=False)
fig.tight_layout()
fig.subplots_adjust(**adj_dict)
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_heatmap_for_hyper_parameters_df(df, ax=None, cmap='colorblind',
title=None, fontsize=12):
import pandas as pd
import seaborn as sns
import numpy as np
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
value_to_int = {j: i for i, j in enumerate(
sorted(pd.unique(df.values.ravel())))} # like you did
# for key in value_to_int.copy().keys():
# try:
# if np.isnan(key):
# value_to_int['NA'] = value_to_int.pop(key)
# df = df.fillna('NA')
# except TypeError:
# pass
try:
sorted_v_to_i = dict(sorted(value_to_int.items()))
except TypeError:
sorted_v_to_i = value_to_int
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap = sns.color_palette(cmap, n)
if ax is None:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
linewidth=1, linecolor='k', square=False,
cbar_kws={"shrink": .9})
else:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
ax=ax, linewidth=1, linecolor='k',
square=False, cbar_kws={"shrink": .9})
if title is not None:
ax.set_title(title, fontsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
colorbar = ax.collections[0].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.set_ticklabels(list(value_to_int.keys()))
return ax
# def plot_ROC_curves_for_all_models_and_scorers(dss, save=False,
# fontsize=24, fig_split=1,
# feat=['pwv', 'pwv+pressure', 'pwv+pressure+doy']):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# import pandas as pd
# cmap = sns.color_palette('tab10', len(feat))
# sns.set_style('whitegrid')
# sns.set_style('ticks')
# if fig_split == 1:
# dss = dss.sel(scorer=['precision', 'recall', 'f1'])
# elif fig_split == 2:
# dss = dss.sel(scorer=['accuracy', 'tss', 'hss'])
# fg = xr.plot.FacetGrid(
# dss,
# col='model',
# row='scorer',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = dss['model'].isel(model=j).item()
# scorer = dss['scorer'].isel(scorer=i).item()
# chance_plot = [False for x in feat]
# chance_plot[-1] = True
# for k, f in enumerate(feat):
# # name = '{}-{}-{}'.format(modelname, scoring, feat)
# # model = dss.isel({'model': j, 'scoring': i}).sel(
# # {'features': feat})
# model = dss.isel({'model': j, 'scorer': i}
# ).sel({'features': f})
# # return model
# title = 'ROC of {} model ({})'.format(modelname.replace('SVC', 'SVM'), scorer)
# try:
# ax = plot_ROC_curve_from_dss_nested_CV(model, outer_dim='outer_split',
# plot_chance=[k],
# main_label=f,
# ax=ax,
# color=cmap[k], title=title,
# fontsize=fontsize)
# except ValueError:
# ax.grid('on')
# continue
# handles, labels = ax.get_legend_handles_labels()
# lh_ser = pd.Series(labels, index=handles).drop_duplicates()
# lh_ser = lh_ser.sort_values(ascending=False)
# hand = lh_ser.index.values
# labe = lh_ser.values
# ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
# fontsize=fontsize-7)
# ax.grid('on')
# if j >= 1:
# ax.set_ylabel('')
# if fig_split == 1:
# ax.set_xlabel('')
# ax.tick_params(labelbottom=False)
# else:
# if i <= 1:
# ax.set_xlabel('')
# # title = '{} station: {} total events'.format(
# # station.upper(), events)
# # if max_flow > 0:
# # title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# # station.upper(), events, max_flow)
# # fg.fig.suptitle(title, fontsize=fontsize)
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# filename = 'ROC_curves_nested_{}_figsplit_{}.png'.format(
# dss['outer_split'].size, fig_split)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
# return fg
def plot_hydro_ML_models_results_from_dss(dss, std_on='outer',
save=False, fontsize=16,
plot_type='ROC', split=1,
feat=['pwv', 'pressure+pwv', 'doy+pressure+pwv']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
cmap = sns.color_palette("colorblind", len(feat))
if split == 1:
dss = dss.sel(scoring=['f1', 'precision', 'recall'])
elif split == 2:
dss = dss.sel(scoring=['tss', 'hss', 'roc-auc', 'accuracy'])
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
chance_plot = [False for x in feat]
chance_plot[-1] = True
for k, f in enumerate(feat):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = dss.isel({'model': j, 'scoring': i}).sel(
# {'features': feat})
model = dss.isel({'model': j, 'scoring': i}
).sel({'features': f})
title = '{} of {} model ({})'.format(
plot_type, modelname, scoring)
try:
plot_ROC_PR_curve_from_dss(model, outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=[k],
main_label=f, plot_type=plot_type,
plot_std_legend=False, ax=ax,
color=cmap[k], title=title,
std_on=std_on, fontsize=fontsize)
except ValueError:
ax.grid('on')
continue
handles, labels = ax.get_legend_handles_labels()
hand = pd.Series(
labels, index=handles).drop_duplicates().index.values
labe = pd.Series(labels, index=handles).drop_duplicates().values
ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
fontsize=14)
ax.grid('on')
# title = '{} station: {} total events'.format(
# station.upper(), events)
# if max_flow > 0:
# title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# station.upper(), events, max_flow)
# fg.fig.suptitle(title, fontsize=fontsize)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.054,
left=0.039,
right=0.993,
hspace=0.173,
wspace=0.051)
if save:
filename = 'hydro_models_on_{}_{}_std_on_{}_{}.png'.format(
dss['inner_kfold'].size, dss['outer_kfold'].size,
std_on, plot_type)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
# def plot_hydro_ML_models_result(model_da, nsplits=2, station='drag',
# test_size=20, n_splits_plot=None, save=False):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# from sklearn.model_selection import train_test_split
# # TODO: add plot_roc_curve(model, X_other_station, y_other_station)
# # TODO: add pw_station, hs_id
# cmap = sns.color_palette("colorblind", 3)
# X, y = produce_X_y(station, hydro_pw_dict[station], neg_pos_ratio=1)
# events = int(y[y == 1].sum().item())
# model_da = model_da.sel(
# splits=nsplits,
# test_size=test_size).reset_coords(
# drop=True)
## just_pw = [x for x in X.feature.values if 'pressure' not in x]
## X_pw = X.sel(feature=just_pw)
# fg = xr.plot.FacetGrid(
# model_da,
# col='model',
# row='scoring',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = model_da['model'].isel(model=j).item()
# scoring = model_da['scoring'].isel(scoring=i).item()
# chance_plot = [False, False, True]
# for k, feat in enumerate(model_da['feature'].values):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = model_da.isel({'model': j, 'scoring': i}).sel({'feature': feat}).item()
# title = 'ROC of {} model ({})'.format(modelname, scoring)
# if not '+' in feat:
# f = [x for x in X.feature.values if feat in x]
# X_f = X.sel(feature=f)
# else:
# X_f = X
# X_train, X_test, y_train, y_test = train_test_split(
# X_f, y, test_size=test_size/100, shuffle=True, random_state=42)
#
# plot_many_ROC_curves(model, X_f, y, name=name,
# color=cmap[k], ax=ax,
# plot_chance=chance_plot[k],
# title=title, n_splits=n_splits_plot)
# fg.fig.suptitle('{} station: {} total_events, test_events = {}, n_splits = {}'.format(station.upper(), events, int(events* test_size/100), nsplits))
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# plt.savefig(savefig_path / 'try.png', bbox_inches='tight')
# return fg
def order_features_list(flist):
""" order the feature list in load_ML_run_results
so i don't get duplicates"""
import pandas as pd
import numpy as np
# first get all features:
li = [x.split('+') for x in flist]
flat_list = [item for sublist in li for item in sublist]
f = list(set(flat_list))
nums = np.arange(1, len(f)+1)
# now assagin a number for each entry:
inds = []
for x in flist:
for fe, num in zip(f, nums):
x = x.replace(fe, str(10**num))
inds.append(eval(x))
ser = pd.Series(inds)
ser.index = flist
ser1 = ser.drop_duplicates()
di = dict(zip(ser1.values, ser1.index))
new_flist = []
for ind, feat in zip(inds, flist):
new_flist.append(di.get(ind))
return new_flist
def smart_add_dataarray_to_ds_list(dsl, da_name='feature_importances'):
"""add data array to ds_list even if it does not exist, use shape of
data array that exists in other part of ds list"""
import numpy as np
import xarray as xr
# print(da_name)
fi = [x for x in dsl if da_name in x][0]
print(da_name, fi[da_name].shape)
fi = fi[da_name].copy(data=np.zeros(shape=fi[da_name].shape))
new_dsl = []
for ds in dsl:
if da_name not in ds:
ds = xr.merge([ds, fi], combine_attrs='no_conflicts')
new_dsl.append(ds)
return new_dsl
def load_ML_run_results(path=hydro_ml_path, prefix='CVR',
change_DOY_to_doy=True):
from aux_gps import path_glob
import xarray as xr
# from aux_gps import save_ncfile
import pandas as pd
import numpy as np
print('loading hydro ML results for all models and features')
# print('loading hydro ML results for station {}'.format(pw_station))
model_files = path_glob(path, '{}_*.nc'.format(prefix))
model_files = sorted(model_files)
# model_files = [x for x in model_files if pw_station in x.as_posix()]
ds_list = [xr.load_dataset(x) for x in model_files]
if change_DOY_to_doy:
for ds in ds_list:
if 'DOY' in ds.features:
new_feats = [x.replace('DOY', 'doy') for x in ds['feature'].values]
ds['feature'] = new_feats
ds.attrs['features'] = [x.replace('DOY', 'doy') for x in ds.attrs['features']]
model_as_str = [x.as_posix().split('/')[-1].split('.')[0]
for x in model_files]
model_names = [x.split('_')[1] for x in model_as_str]
model_scores = [x.split('_')[3] for x in model_as_str]
model_features = [x.split('_')[2] for x in model_as_str]
if change_DOY_to_doy:
model_features = [x.replace('DOY', 'doy') for x in model_features]
new_model_features = order_features_list(model_features)
ind = pd.MultiIndex.from_arrays(
[model_names,
new_model_features,
model_scores],
names=(
'model',
'features',
'scoring'))
# ind1 = pd.MultiIndex.from_product([model_names, model_scores, model_features], names=[
# 'model', 'scoring', 'feature'])
# ds_list = [x[data_vars] for x in ds_list]
# complete non-existant fields like best and fi for all ds:
data_vars = [x for x in ds_list[0] if x.startswith('test')]
# data_vars += ['AUC', 'TPR']
data_vars += [x for x in ds_list[0] if x.startswith('y_')]
bests = [[x for x in y if x.startswith('best')] for y in ds_list]
data_vars += list(set([y for x in bests for y in x]))
if 'RF' in model_names:
data_vars += ['feature_importances']
new_ds_list = []
for dvar in data_vars:
ds_list = smart_add_dataarray_to_ds_list(ds_list, dvar)
# # check if all data vars are in each ds and merge them:
new_ds_list = [xr.merge([y[x] for x in data_vars if x in y],
combine_attrs='no_conflicts') for y in ds_list]
# concat all
dss = xr.concat(new_ds_list, dim='dim_0')
dss['dim_0'] = ind
dss = dss.unstack('dim_0')
# dss.attrs['pwv_id'] = pw_station
# fix roc_auc to roc-auc in dss datavars
dss = dss.rename_vars({'test_roc_auc': 'test_roc-auc'})
# dss['test_roc_auc'].name = 'test_roc-auc'
print('calculating ROC, PR metrics.')
dss = calculate_metrics_from_ML_dss(dss)
print('Done!')
return dss
def plot_nested_CV_test_scores(dss, feats=None, fontsize=16,
save=True, wv_label='pwv'):
import seaborn as sns
import matplotlib.pyplot as plt
from aux_gps import convert_da_to_long_form_df
import numpy as np
import xarray as xr
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None):
import numpy as np
def _show_on_single_plot(ax, exclude_bar_num=3):
for i, p in enumerate(ax.patches):
if i != exclude_bar_num and exclude_bar_num is not None:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="right",
fontsize=fs, fontweight=fw, zorder=20)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax, exclude_bar_num)
else:
_show_on_single_plot(axs, exclude_bar_num)
splits = dss['outer_split'].size
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
except KeyError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['test_score'].to_dataframe()
# df['scorer'] = df.index.get_level_values(3)
# df['model'] = df.index.get_level_values(0)
# df['features'] = df.index.get_level_values(1)
# df['outer_splits'] = df.index.get_level_values(2)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars='test_score', id_vars=[
# 'features', 'model', 'scorer', 'outer_splits'], var_name='test_score',
# value_name='score')
da = dst['test_score']
if len(feats) == 5:
da_empty = da.isel(features=0).copy(
data=np.zeros(da.isel(features=0).shape))
da_empty['features'] = 'empty'
da = xr.concat([da, da_empty], 'features')
da = da.reindex(features=['doy', 'pressure', 'pwv',
'empty', 'pwv+pressure', 'pwv+pressure+doy'])
da.name = 'feature groups'
df = convert_da_to_long_form_df(da, value_name='score',
var_name='feature groups')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = sns.color_palette('tab10', n_colors=len(feats))
if len(feats) == 5:
cmap = ['tab:purple', 'tab:brown', 'tab:blue', 'tab:blue',
'tab:orange', 'tab:green']
fg = sns.FacetGrid(data=df, row='model', col='scorer', height=4, aspect=0.9)
# fg.map_dataframe(sns.stripplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, alpha=1, zorder=1, palette=cmap)
# fg.map_dataframe(sns.pointplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, join=False, palette=cmap,
# markers="o", scale=.75, ci=None)
fg.map_dataframe(sns.barplot, x='feature groups', y="score", hue='features',
ci='sd', capsize=None, errwidth=2, errcolor='k',
palette=cmap, dodge=True)
# g = sns.catplot(x='test_score', y="score", hue='features',
# col="scorer", row='model', ci='sd',
# data=df, kind="bar", capsize=0.25,
# height=4, aspect=1.5, errwidth=1.5)
#fg.set_xticklabels(rotation=45)
# fg.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize)
fg.set_ylabels('score')
[x.grid(True) for x in fg.axes.flatten()]
handles, labels = fg.axes[0, 0].get_legend_handles_labels()
if len(feats) == 5:
del handles[3]
del labels[3]
show_values_on_bars(fg.axes, fs=fontsize-4, exclude_bar_num=3)
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
scorer = dss['scorer'].isel(scorer=j).item()
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel('')
ax.set_ylim(0, 1)
change_width(ax, 0.110)
fg.set_xlabels(' ')
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=len(feats), fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'ML_scores_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ML_scores_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_holdout_test_scores(dss, feats='pwv+pressure+doy'):
import seaborn as sns
import matplotlib.pyplot as plt
def show_values_on_bars(axs, fs=12, fw='bold'):
import numpy as np
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="center", fontsize=fs, fontweight=fw)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
df = dst['holdout_test_scores'].to_dataframe()
df['scorer'] = df.index.droplevel(1).droplevel(0)
df['model'] = df.index.droplevel(2).droplevel(1)
df['features'] = df.index.droplevel(2).droplevel(0)
df['model'] = df['model'].str.replace('SVC', 'SVM')
df = df.melt(value_vars='holdout_test_scores', id_vars=[
'features', 'model', 'scorer'], var_name='test_score')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
g = sns.catplot(x="model", y="value", hue='features',
col="scorer", ci='sd', row=None,
col_wrap=3,
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5, errwidth=0.8)
g.set_xticklabels(rotation=45)
[x.grid(True) for x in g.axes.flatten()]
show_values_on_bars(g.axes)
filename = 'ML_scores_models_holdout_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def prepare_test_df_to_barplot_from_dss(dss, feats='doy+pwv+pressure',
plot=True, splitfigs=True):
import seaborn as sns
import matplotlib.pyplot as plt
dvars = [x for x in dss if 'test_' in x]
scores = [x.split('_')[-1] for x in dvars]
dst = dss[dvars]
# dst['scoring'] = [x+'_inner' for x in dst['scoring'].values]
# for i, ds in enumerate(dst):
# dst[ds] = dst[ds].sel(scoring=scores[i]).reset_coords(drop=True)
if feats is None:
feats = ['pwv', 'pressure+pwv', 'doy+pressure+pwv']
dst = dst.sel(features=feats) # .reset_coords(drop=True)
dst = dst.rename_vars(dict(zip(dvars, scores)))
# dst = dst.drop('scoring')
df = dst.to_dataframe()
# dfu = df
df['inner score'] = df.index.droplevel(2).droplevel(1).droplevel(0)
df['features'] = df.index.droplevel(2).droplevel(2).droplevel(1)
df['model'] = df.index.droplevel(2).droplevel(0).droplevel(1)
df = df.melt(value_vars=scores, id_vars=[
'features', 'model', 'inner score'], var_name='outer score')
# return dfu
# dfu.columns = dfu.columns.droplevel(1)
# dfu = dfu.T
# dfu['score'] = dfu.index
# dfu = dfu.reset_index()
# df = dfu.melt(value_vars=['MLP', 'RF', 'SVC'], id_vars=['score'])
df1 = df[(df['inner score']=='f1') | (df['inner score']=='precision') | (df['inner score']=='recall')]
df2 = df[(df['inner score']=='hss') | (df['inner score']=='tss') | (df['inner score']=='roc-auc') | (df['inner score']=='accuracy')]
if plot:
sns.set(font_scale = 1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
if splitfigs:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df1, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_1.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df2, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_2.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def calculate_metrics_from_ML_dss(dss):
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
import xarray as xr
import numpy as np
import pandas as pd
mean_fpr = np.linspace(0, 1, 100)
# fpr = dss['y_true'].copy(deep=False).values
# tpr = dss['y_true'].copy(deep=False).values
# y_true = dss['y_true'].values
# y_prob = dss['y_prob'].values
ok = [x for x in dss['outer_kfold'].values]
ik = [x for x in dss['inner_kfold'].values]
m = [x for x in dss['model'].values]
sc = [x for x in dss['scoring'].values]
f = [x for x in dss['features'].values]
# r = [x for x in dss['neg_pos_ratio'].values]
ind = pd.MultiIndex.from_product(
[ok, ik, m, sc, f],
names=[
'outer_kfold',
'inner_kfold',
'model',
'scoring',
'features']) # , 'station'])
okn = [x for x in range(dss['outer_kfold'].size)]
ikn = [x for x in range(dss['inner_kfold'].size)]
mn = [x for x in range(dss['model'].size)]
scn = [x for x in range(dss['scoring'].size)]
fn = [x for x in range(dss['features'].size)]
ds_list = []
for i in okn:
for j in ikn:
for k in mn:
for n in scn:
for m in fn:
ds = xr.Dataset()
y_true = dss['y_true'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_prob = dss['y_prob'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_true = y_true.dropna('sample')
y_prob = y_prob.dropna('sample')
if y_prob.size == 0:
# in case of NaNs in the results:
fpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
fpr_da['sample'] = [
x for x in range(fpr_da.size)]
tpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
tpr_da['sample'] = [
x for x in range(tpr_da.size)]
prn_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
prn_da['sample'] = [
x for x in range(prn_da.size)]
rcll_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
rcll_da['sample'] = [
x for x in range(rcll_da.size)]
tpr_fpr = xr.DataArray(
np.nan*np.ones((100)), dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
np.nan*np.ones((100)), dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
pr_auc_da = xr.DataArray(np.nan)
roc_auc_da = xr.DataArray(np.nan)
no_skill_da = xr.DataArray(np.nan)
else:
no_skill = len(
y_true[y_true == 1]) / len(y_true)
no_skill_da = xr.DataArray(no_skill)
fpr, tpr, _ = roc_curve(y_true, y_prob)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_true, y_prob)
prn, rcll, _ = precision_recall_curve(
y_true, y_prob)
interp_prn = np.interp(
mean_fpr, rcll[::-1], prn[::-1])
interp_prn[0] = 1.0
pr_auc_score = auc(rcll, prn)
roc_auc_da = xr.DataArray(roc_auc)
pr_auc_da = xr.DataArray(pr_auc_score)
prn_da = xr.DataArray(prn, dims=['sample'])
prn_da['sample'] = [x for x in range(len(prn))]
rcll_da = xr.DataArray(rcll, dims=['sample'])
rcll_da['sample'] = [
x for x in range(len(rcll))]
fpr_da = xr.DataArray(fpr, dims=['sample'])
fpr_da['sample'] = [x for x in range(len(fpr))]
tpr_da = xr.DataArray(tpr, dims=['sample'])
tpr_da['sample'] = [x for x in range(len(tpr))]
tpr_fpr = xr.DataArray(
interp_tpr, dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
interp_prn, dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
ds['fpr'] = fpr_da
ds['tpr'] = tpr_da
ds['roc-auc'] = roc_auc_da
ds['pr-auc'] = pr_auc_da
ds['prn'] = prn_da
ds['rcll'] = rcll_da
ds['TPR'] = tpr_fpr
ds['PRN'] = prn_rcll
ds['no_skill'] = no_skill_da
ds_list.append(ds)
ds = xr.concat(ds_list, 'dim_0')
ds['dim_0'] = ind
ds = ds.unstack()
ds.attrs = dss.attrs
ds['fpr'].attrs['long_name'] = 'False positive rate'
ds['tpr'].attrs['long_name'] = 'True positive rate'
ds['prn'].attrs['long_name'] = 'Precision'
ds['rcll'].attrs['long_name'] = 'Recall'
ds['roc-auc'].attrs['long_name'] = 'ROC or FPR-TPR Area under curve'
ds['pr-auc'].attrs['long_name'] = 'Precition-Recall Area under curve'
ds['PRN'].attrs['long_name'] = 'Precision-Recall'
ds['TPR'].attrs['long_name'] = 'TPR-FPR (ROC)'
dss = xr.merge([dss, ds], combine_attrs='no_conflicts')
return dss
#
# def load_ML_models(path=hydro_ml_path, station='drag', prefix='CVM', suffix='.pkl'):
# from aux_gps import path_glob
# import joblib
# import matplotlib.pyplot as plt
# import seaborn as sns
# import xarray as xr
# import pandas as pd
# model_files = path_glob(path, '{}_*{}'.format(prefix, suffix))
# model_files = sorted(model_files)
# model_files = [x for x in model_files if station in x.as_posix()]
# m_list = [joblib.load(x) for x in model_files]
# model_files = [x.as_posix().split('/')[-1].split('.')[0] for x in model_files]
# # fix roc-auc:
# model_files = [x.replace('roc_auc', 'roc-auc') for x in model_files]
# print('loading {} station only.'.format(station))
# model_names = [x.split('_')[3] for x in model_files]
## model_pw_stations = [x.split('_')[1] for x in model_files]
## model_hydro_stations = [x.split('_')[2] for x in model_files]
# model_nsplits = [x.split('_')[6] for x in model_files]
# model_scores = [x.split('_')[5] for x in model_files]
# model_features = [x.split('_')[4] for x in model_files]
# model_test_sizes = []
# for file in model_files:
# try:
# model_test_sizes.append(int(file.split('_')[7]))
# except IndexError:
# model_test_sizes.append(20)
## model_pwv_hs_id = list(zip(model_pw_stations, model_hydro_stations))
## model_pwv_hs_id = ['_'.join(x) for filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
# name, features, refitted_scorer, ikfolds, okfolds)
# x in model_pwv_hs_id]
# # transform model_dict to dataarray:
# tups = [tuple(x) for x in zip(model_names, model_scores, model_nsplits, model_features, model_test_sizes)] #, model_pwv_hs_id)]
# ind = pd.MultiIndex.from_tuples((tups), names=['model', 'scoring', 'splits', 'feature', 'test_size']) #, 'station'])
# da = xr.DataArray(m_list, dims='dim_0')
# da['dim_0'] = ind
# da = da.unstack('dim_0')
# da['splits'] = da['splits'].astype(int)
# da['test_size'].attrs['units'] = '%'
# return da
def plot_heatmaps_for_all_models_and_scorings(dss, var='roc-auc'): # , save=True):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
# assert station == dss.attrs['pwv_id']
cmaps = {'roc-auc': sns.color_palette("Blues", as_cmap=True),
'pr-auc': sns.color_palette("Greens", as_cmap=True)}
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(10, 20))
dss = dss.mean('inner_kfold', keep_attrs=True)
vmin, vmax = dss[var].min(), 1
norm = plt.Normalize(vmin=vmin, vmax=vmax)
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
model = dss[var].isel(
{'model': j, 'scoring': i}).reset_coords(drop=True)
df = model.to_dataframe()
title = '{} model ({})'.format(modelname, scoring)
df = df.unstack()
mean = df.mean()
mean.name = 'mean'
df = df.append(mean).T.droplevel(0)
ax = sns.heatmap(df, annot=True, cmap=cmaps[var], cbar=False,
ax=ax, norm=norm)
ax.set_title(title)
ax.vlines([4], 0, 10, color='r', linewidth=2)
if j > 0:
ax.set_ylabel('')
if i < 2:
ax.set_xlabel('')
cax = fg.fig.add_axes([0.1, 0.025, .8, .015])
fg.fig.colorbar(ax.get_children()[0], cax=cax, orientation="horizontal")
fg.fig.suptitle('{}'.format(
dss.attrs[var].upper()), fontweight='bold')
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.099,
left=0.169,
right=0.993,
hspace=0.173,
wspace=0.051)
# if save:
# filename = 'hydro_models_heatmaps_on_{}_{}_{}.png'.format(
# station, dss['outer_kfold'].size, var)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_ROC_from_dss(dss, feats=None, fontsize=16, save=True, wv_label='pwv',
best=False):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
sns.set(font_scale=1.0)
cmap = sns.color_palette('tab10', n_colors=3)
splits = dss['outer_split'].size
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['TPR'].to_dataframe()
# if 'neg_sample' in dss.dims:
# fpr_lnum = 5
# model_lnum = 0
# scorer_lnum = 4
# features_lnum = 1
# else:
# fpr_lnum = 4
# model_lnum = 0
# scorer_lnum = 3
# features_lnum = 1
# df['FPR'] = df.index.get_level_values(fpr_lnum)
# df['model'] = df.index.get_level_values(model_lnum)
# df['scorer'] = df.index.get_level_values(scorer_lnum)
# df['features'] = df.index.get_level_values(features_lnum)
df = convert_da_to_long_form_df(dst['TPR'], var_name='score')
# df = df.melt(value_vars='TPR', id_vars=[
# 'features', 'model', 'scorer', 'FPR'], var_name='score')
if best is not None:
if best == 'compare_negs':
df1 = df.copy()[df['neg_sample'] == 1]
df2 = df.copy()
df2.drop('neg_sample', axis=1, inplace=True)
df1.drop('neg_sample', axis=1, inplace=True)
df1['neg_group'] = 1
df2['neg_group'] = 25
df = pd.concat([df1, df2])
col = 'neg_group'
titles = ['Neg=1', 'Neg=25']
else:
col=None
else:
col = 'scorer'
df['model'] = df['model'].str.replace('SVC', 'SVM')
fg = sns.FacetGrid(df, col=col, row='model', aspect=1)
fg.map_dataframe(sns.lineplot, x='FPR', y='value',
hue='features', ci='sd', palette=cmap, n_boot=None,
estimator='mean')
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
auc_model = dst.sel(model=model)
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
if j == 0 and best is not None:
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).isel(neg_sample=0).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
ax = fg.axes[i, j]
ax.plot([0, 1], [0, 1], color='tab:red', linestyle='--', lw=2,
label='chance')
if best is not None:
if best == 'compare_negs':
title = '{} | {}'.format(model, titles[j])
else:
title = '{}'.format(model)
else:
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
handles, labels = ax.get_legend_handles_labels()
hands = handles[0:3]
# labes = labels[0:3]
new_labes = []
for auc, auc_sd in zip(auc_mean, auc_std):
l = r'{:.2}$\pm${:.1}'.format(auc, auc_sd)
new_labes.append(l)
ax.legend(handles=hands, labels=new_labes, loc='lower right',
title='AUCs', prop={'size': fontsize-4})
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.grid(True)
# return handles, labels
fg.set_ylabels('True Positive Rate', fontsize=fontsize)
fg.set_xlabels('False Positive Rate', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
if best is not None:
if best == 'compare_negs':
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=2, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.865,
bottom=0.079,
left=0.144,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=1, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.825,
bottom=0.079,
left=0.184,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.915)
if save:
if best is not None:
filename = 'ROC_plots_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ROC_plots_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_permutation_importances_from_dss(dss, feat_dim='features',
outer_dim='outer_split',
features='pwv+pressure+doy',
fix_xticklabels=True,split=1,
axes=None, save=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
sns.set_style('whitegrid')
sns.set_style('ticks')
model = dss.attrs['model']
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
# dss = dss.mean(outer_dim)
dss = dss.sel({outer_dim: split})
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [2, 5, 5]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(sorted(feats)):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['PI_mean'].sel(
feature=fe).reset_coords(
drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
dsf = dsf.reindex(feature=sorted_feat)
print([x for x in dsf.feature.values])
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True)
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f})'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 10}, loc='upper left')
axes[i].set_ylabel('Scores')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24, -24+n)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
fig.tight_layout()
fig.suptitle('permutation importance scores for {} model split #{}'.format(model, split))
fig.subplots_adjust(top=0.904)
if save:
filename = 'permutation_importances_{}_split_{}_all_scorers_{}.png'.format(model, split, features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_from_dss(
dss,
feat_dim='features', outer_dim='outer_split',
features='pwv+pressure+doy', fix_xticklabels=True,
axes=None, save=True, ylim=[0, 12], fontsize=16):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sns.set_theme(style='ticks', font_scale=1.5)
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
dss = dss.mean(outer_dim)
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [5, 5, 2]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe).reset_coords(
drop=True)
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
# sorted_feat = [x for x in dsf.feature.values]
print(sorted_feat)
dsf = dsf.reindex(feature=sorted_feat)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True) * 100
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
axes[i].set_title(title, fontsize=fontsize)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 12}, loc='upper center')
axes[i].set_ylabel('Feature importances [%]')
axes[i].grid(axis='y', zorder=1)
if ylim is not None:
[ax.set_ylim(*ylim) for ax in axes]
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[2].xaxis.set_ticklabels('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[2].tick_params(labelsize=fontsize)
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
if save:
filename = 'RF_feature_importances_all_scorers_{}.png'.format(features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances(
dss,
feat_dim='features',
features='pwv+pressure+doy',
scoring='f1', fix_xticklabels=True,
axes=None, save=True):
# use dss.sel(model='RF') first as input
import matplotlib.pyplot as plt
import numpy as np
dss = dss.sel({feat_dim: features})
tests_ds = dss[[x for x in dss if 'test' in x]]
tests_ds = tests_ds.sel(scoring=scoring)
score_ds = tests_ds['test_{}'.format(scoring)]
max_score = score_ds.idxmax('outer_kfold').values
feats = features.split('+')
fn = len(feats)
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': [1, 4, 4]})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe,
outer_kfold=max_score).reset_coords(
drop=True)
dsf = dsf.to_dataset('scoring').to_dataframe(
).reset_index(drop=True) * 100
title = '{} ({})'.format(f.upper(), scoring)
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 8})
axes[i].set_ylabel('Feature importance [%]')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24,0)
axes[1].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
if save:
fig.tight_layout()
filename = 'RF_feature_importances_{}.png'.format(scoring)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_for_all_scorings(dss,
features='doy+pwv+pressure',
model='RF', splitfigs=True):
import matplotlib.pyplot as plt
# station = dss.attrs['pwv_id'].upper()
dss = dss.sel(model=model).reset_coords(drop=True)
fns = len(features.split('+'))
scores = dss['scoring'].values
scores1 = ['f1', 'precision', 'recall']
scores2 = ['hss', 'tss', 'accuracy','roc-auc']
if splitfigs:
fig, axes = plt.subplots(len(scores1), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores1):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_1.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
fig, axes = plt.subplots(len(scores2), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores2):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_2.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
fig, axes = plt.subplots(len(scores), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return dss
def plot_ROC_curve_from_dss_nested_CV(dss, outer_dim='outer_split',
plot_chance=True, color='tab:blue',
fontsize=14, plot_legend=True,
title=None,
ax=None, main_label=None):
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).values
mean_auc = dss['roc_auc_score'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc_auc_score'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
if main_label is None:
main_label = r'Mean ROC (AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)
textstr = '\n'.join(['{}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=3, alpha=.8, label=main_label)
std_tpr = dss[field].std(outer_dim).values
n = dss[outer_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8, zorder=206)
stdlabel = r'$\pm$ 1 Std. dev.'
stdstr = '\n'.join(['{}'.format(stdlabel), r'({} outer splits)'.format(n)])
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=stdstr)
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
# ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
return ax
def plot_ROC_PR_curve_from_dss(
dss,
outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=True,
ax=None,
color='b',
title=None,
std_on='inner',
main_label=None,
fontsize=14,
plot_type='ROC',
plot_std_legend=True):
"""plot classifier metrics, plot_type=ROC or PR"""
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
if plot_type == 'ROC':
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['roc-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc-auc'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
elif plot_type == 'PR':
mean_fpr = dss['RCLL'].values
mean_tpr = dss['PRN'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['pr-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['pr-auc'].std().item()
no_skill = dss['no_skill'].mean(outer_dim).mean(inner_dim).item()
field = 'PRN'
xlabel = 'Recall'
ylabel = 'Precision'
# plot mean ROC:
if main_label is None:
main_label = r'Mean {} (AUC={:.2f}$\pm${:.2f})'.format(
plot_type, mean_auc, std_auc)
else:
textstr = '\n'.join(['Mean ROC {}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=2, alpha=.8, label=main_label)
if std_on == 'inner':
std_tpr = dss[field].mean(outer_dim).std(inner_dim).values
n = dss[inner_dim].size
elif std_on == 'outer':
std_tpr = dss[field].mean(inner_dim).std(outer_dim).values
n = dss[outer_dim].size
elif std_on == 'all':
std_tpr = dss[field].stack(
dumm=[inner_dim, outer_dim]).std('dumm').values
n = dss[outer_dim].size * dss[inner_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
if plot_type == 'ROC':
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
elif plot_type == 'PR':
ax.plot([0, 1], [no_skill, no_skill], linestyle='--', color='r',
lw=2, label='No Skill', alpha=.8)
# plot ROC STD range:
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=r'$\pm$ 1 std. dev. ({} {} splits)'.format(n, std_on))
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
# handles, labels = ax.get_legend_handles_labels()
# if not plot_std_legend:
# if len(handles) == 7:
# handles = handles[:-2]
# labels = labels[:-2]
# else:
# handles = handles[:-1]
# labels = labels[:-1]
# ax.legend(handles=handles, labels=labels, loc="lower right",
# fontsize=fontsize)
return ax
def load_cv_splits_from_pkl(savepath):
import joblib
from aux_gps import path_glob
file = path_glob(savepath, 'CV_inds_*.pkl')[0]
n_splits = int(file.as_posix().split('/')[-1].split('_')[2])
shuffle = file.as_posix().split('/')[-1].split('.')[0].split('=')[-1]
cv_dict = joblib.load(file)
spl = len([x for x in cv_dict.keys()])
assert spl == n_splits
print('loaded {} with {} splits.'.format(file, n_splits))
return cv_dict
def save_cv_splits_to_dict(X, y, cv, train_key='train', test_key='test',
savepath=None):
import joblib
cv_dict = {}
for i, (train, test) in enumerate(cv.split(X, y)):
cv_dict[i+1] = {train_key: train, test_key: test}
# check for completness:
all_train = [x['train'] for x in cv_dict.values()]
flat_train = set([item for sublist in all_train for item in sublist])
all_test = [x['test'] for x in cv_dict.values()]
flat_test = set([item for sublist in all_test for item in sublist])
assert flat_test == flat_train
if savepath is not None:
filename = 'CV_inds_{}_splits_shuffle={}.pkl'.format(cv.n_splits, cv.shuffle)
joblib.dump(cv_dict, savepath / filename)
print('saved {} to {}.'.format(filename, savepath))
return cv_dict
def plot_many_ROC_curves(model, X, y, name='', color='b', ax=None,
plot_chance=True, title=None, n_splits=None):
from sklearn.metrics import plot_roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import numpy as np
from sklearn.model_selection import StratifiedKFold
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
# just plot the ROC curve for X, y, no nsplits and stats:
if n_splits is None:
viz = plot_roc_curve(model, X, y, color=color, ax=ax, name=name)
else:
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
# y_score = model.fit(X[train], y[train]).predict_proba(X[val])[:, 1]
y_pred = model.predict(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
# viz = plot_roc_curve(model, X[val], y[val],
# name='ROC fold {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
# fpr = viz.fpr
# tpr = viz.tpr
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
# scores.append(f1_score(y[val], y_pred))
# scores = np.array(scores)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color=color,
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (
mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title=title)
ax.legend(loc="lower right")
return ax
def HP_tuning(X, y, model_name='SVC', val_size=0.18, n_splits=None,
test_size=None,
best_score='f1', seed=42, savepath=None):
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
""" do HP tuning with ML_Classfier_Switcher object and return a DataSet of
results. note that the X, y are already after split to val/test"""
# first get the features from X:
features = list(set(['_'.join(x.split('_')[0:2])
for x in X['feature'].values]))
ml = ML_Classifier_Switcher()
sk_model = ml.pick_model(model_name)
param_grid = ml.param_grid
if n_splits is None and val_size is not None:
n_splits = int((1 // val_size) - 1)
elif val_size is not None and n_splits is not None:
raise('Both val_size and n_splits are defined, choose either...')
print('StratifiedKfolds of {}.'.format(n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
gr = GridSearchCV(estimator=sk_model, param_grid=param_grid, cv=cv,
n_jobs=-1, scoring=['f1', 'roc_auc', 'accuracy'], verbose=1,
refit=best_score, return_train_score=True)
gr.fit(X, y)
if best_score is not None:
ds, best_model = process_gridsearch_results(gr, model_name,
features=features, pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
else:
ds = process_gridsearch_results(gr, model_name, features=features,
pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
best_model = None
if savepath is not None:
save_cv_results(ds, best_model=best_model, savepath=savepath)
return ds, best_model
def save_gridsearchcv_object(GridSearchCV, savepath, filename):
import joblib
print('{} was saved to {}'.format(filename, savepath))
joblib.dump(GridSearchCV, savepath / filename)
return
def run_RF_feature_importance_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
da = holdout_test(model_name='RF', return_RF_FI=True, features=feat)
feat_list.append(da)
daa = xr.concat(feat_list, 'features')
daa['features'] = feats
return daa
def load_nested_CV_test_results_from_all_models(path=hydro_ml_path, best=False,
neg=1, splits=4,
permutation=False):
from aux_gps import path_glob
import xarray as xr
if best:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_best_hp_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_best_hp_neg_{}_{}a.nc'.format(neg, splits)
else:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_neg_{}_{}a.nc'.format(neg, splits)
files = path_glob(path, file_str)
print(files)
models = [x.as_posix().split('/')[-1].split('_')[4] for x in files]
print('loading CV test results only for {} models'.format(', '.join(models)))
dsl = [xr.load_dataset(x) for x in files]
if not permutation:
dsl = [x[['mean_score', 'std_score', 'test_score', 'roc_auc_score', 'TPR']] for x in dsl]
dss = xr.concat(dsl, 'model')
dss['model'] = models
return dss
# def plot_all_permutation_test_results(dss, feats=None):
# import xarray as xr
# fg = xr.plot.FacetGrid(
# dss,
# col='scorer',
# row='model',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# model = dss['model'].isel(model=i).item()
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# scorer = dss['scorer'].isel(scorer=j).item()
# ax = plot_single_permutation_test_result(dss, feats=feats,
# scorer=scorer,
# model=model,
# ax=ax)
# fg.fig.tight_layout()
# return fg
def plot_permutation_test_results_from_dss(dss, feats=None, fontsize=14,
save=True, wv_label='pwv'):
# ax=None, scorer='f1', model='MLP'):
import matplotlib.pyplot as plt
import seaborn as sns
from PW_from_gps_figures import get_legend_labels_handles_title_seaborn_histplot
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
try:
splits = dss['outer_split'].size
except KeyError:
splits = 5
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.reindex(scorer=scorer_order)
# dss = dss.mean('outer_split')
cmap = sns.color_palette('tab10', n_colors=3)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst[['permutation_score', 'true_score', 'pvalue']].to_dataframe()
# df['permutations'] = df.index.get_level_values(2)
# df['scorer'] = df.index.get_level_values(3)
# df['features'] = df.index.get_level_values(0)
# df['model'] = df.index.get_level_values(1)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars=['permutation_score', 'true_score', 'pvalue'], id_vars=[
# 'features', 'model', 'scorer'], var_name='scores')
df = convert_da_to_long_form_df(dst[['permutation_score', 'true_score', 'pvalue']], var_name='scores')
df_p = df[df['scores'] == 'permutation_score']
df_pval = df[df['scores'] == 'pvalue']
# if ax is None:
# fig, ax = plt.subplots(figsize=(6, 8))
fg = sns.FacetGrid(df_p, col='scorer', row='model', legend_out=True,
sharex=False)
fg.map_dataframe(sns.histplot, x="value", hue="features",
legend=True, palette=cmap,
stat='density', kde=True,
element='bars', bins=10)
# pvals = dst.sel(scorer=scorer, model=model)[
# 'pvalue'].reset_coords(drop=True)
# pvals = pvals.values
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
# new_labels = []
# for pval, label in zip(pvals, labels):
# label += ' (p={:.1})'.format(pval)
# new_labels.append(label)
# ax.legend(handles, new_labels, title=title)
df_t = df[df['scores'] == 'true_score']
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
df_model = df_t[df_t['model'] == model]
df_pval_model = df_pval[df_pval['model'] == model]
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
df1 = df_model[df_model['scorer'] == scorer]
df2 = df_pval_model[df_pval_model['scorer'] == scorer]
ax = fg.axes[i, j]
ymax = ax.get_ylim()[-1] - 0.2
plabels = []
for k, feat in enumerate(feats):
val = df1[df1['features']==feat]['value'].unique().item()
pval = df2[df2['features']==feat]['value'].unique().item()
plabels.append('pvalue: {:.2g}'.format(pval))
# print(i, val, feat, scorer, model)
ax.axvline(x=val, ymin=0, ymax=ymax, linestyle='--', color=cmap[k],
label=feat)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles, labels=plabels,
prop={'size': fontsize-4}, loc='upper left')
if 'hss' in scorer or 'tss' in scorer:
ax.set_xlim(-0.35, 1)
else:
ax.set_xlim(0.15, 1)
# ax.set_xticks([0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1])
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
if model == 'SVC':
model = 'SVM'
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
# ax.set_xlim(-0.3, 1)
fg.set_ylabels('Density', fontsize=fontsize)
fg.set_xlabels('Score', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'permutation_test_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'permutation_test_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def run_CV_nested_tests_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'nested4',
verbose=False, model_name='SVC', params=None,
savepath=None, drop_hours=None, PI=30, Ptest=None,
suffix=None, sample_from_negatives=1):
"""returns the nested CV test results for all scorers, features and models,
if model is chosen, i.e., model='MLP', returns just this model results
and its hyper-parameters per each outer split"""
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
from aux_gps import save_ncfile
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
print('Running CV on feature {}'.format(feat))
ds = CV_test_after_GridSearchCV(path=path, gr_path=gr_path,
model_name=model_name, params=params,
features=feat, PI=PI, Ptest=Ptest,
verbose=verbose, drop_hours=drop_hours,
sample_from_negatives=sample_from_negatives)
feat_list.append(ds)
dsf = xr.concat(feat_list, 'features')
dsf['features'] = feats
dss = dsf
dss.attrs['model'] = model_name
if Ptest is not None:
filename = 'nested_CV_test_results_{}_all_features_permutation_tests'.format(model_name)
else:
filename = 'nested_CV_test_results_{}_all_features_with_hyper_params'.format(model_name)
if params is not None:
dss.attrs['comment'] = 'using best hyper parameters for all features and outer splits'
filename += '_best_hp'
filename += '_neg_{}'.format(sample_from_negatives)
if suffix is not None:
filename += '_{}'.format(suffix)
filename += '.nc'
if savepath is not None:
save_ncfile(dss, savepath, filename)
return dss
def run_holdout_test_on_all_models_and_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
models = ['MLP', 'SVC', 'RF']
model_list = []
model_list2 = []
for model in models:
feat_list = []
feat_list2 = []
for feat in feats:
best, roc = holdout_test(path=path, gr_path=gr_path,
model_name=model, features=feat)
best.index.name = 'scorer'
ds = best[['mean_score', 'std_score', 'holdout_test_scores']].to_xarray()
roc.index.name = 'FPR'
roc_da = roc.to_xarray().to_array('scorer')
feat_list.append(ds)
feat_list2.append(roc_da)
dsf = xr.concat(feat_list, 'features')
dsf2 = xr.concat(feat_list2, 'features')
dsf['features'] = feats
dsf2['features'] = feats
model_list.append(dsf)
model_list2.append(dsf2)
dss = xr.concat(model_list, 'model')
rocs = xr.concat(model_list2, 'model')
dss['model'] = models
rocs['model'] = models
dss['roc'] = rocs
return dss
def prepare_X_y_for_holdout_test(features='pwv+doy', model_name='SVC',
path=hydro_path, drop_hours=None,
negative_samples=1):
# combine X,y and split them according to test ratio and seed:
X, y = combine_pos_neg_from_nc_file(path, negative_sample_num=negative_samples)
# re arange X features according to model:
feats = features.split('+')
if model_name == 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(feats, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(feats, str):
feats = ['doy_sin']
feats.append('doy_cos')
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(select_features_from_X(X1, feats))
X = Xs
else:
X = select_features_from_X(X, feats)
if drop_hours is not None:
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(drop_hours_in_pwv_pressure_features(X1, drop_hours,
verbose=True))
X = Xs
else:
X = drop_hours_in_pwv_pressure_features(X, drop_hours, verbose=True)
return X, y
def CV_test_after_GridSearchCV(path=hydro_path, gr_path=hydro_ml_path/'nested4',
model_name='SVC', features='pwv', params=None,
verbose=False, drop_hours=None, PI=None,
Ptest=None, sample_from_negatives=1):
"""do cross_validate with all scorers on all gridsearchcv folds,
reads the nested outer splits CV file in gr_path"""
import xarray as xr
import numpy as np
# cv = read_cv_params_and_instantiate(gr_path/'CV_outer.csv')
cv_dict = load_cv_splits_from_pkl(gr_path)
if verbose:
print(cv_dict)
param_df_dict = load_one_gridsearchcv_object(path=gr_path,
cv_type='nested',
features=features,
model_name=model_name,
verbose=verbose)
Xs, ys = prepare_X_y_for_holdout_test(features, model_name, path,
drop_hours=drop_hours,
negative_samples=sample_from_negatives)
bests = []
for i, negative_sample in enumerate(np.arange(1, sample_from_negatives + 1)):
print('running with negative sample #{} out of {}'.format(
negative_sample, sample_from_negatives))
if isinstance(Xs, list):
X = Xs[i]
y = ys[i]
else:
X = Xs
y = ys
if Ptest is not None:
print('Permutation Test is in progress!')
ds = run_permutation_classifier_test(X, y, 5, param_df_dict, Ptest=Ptest,
params=params,
model_name=model_name, verbose=verbose)
return ds
if params is not None:
if verbose:
print('running with custom hyper parameters: ', params)
outer_bests = []
outer_rocs = []
fis = []
pi_means = []
pi_stds = []
n_splits = len([x for x in cv_dict.keys()])
for split, tt in cv_dict.items():
X_train = X[tt['train']]
y_train = y[tt['train']]
X_test = X[tt['test']]
y_test = y[tt['test']]
outer_split = '{}-{}'.format(split, n_splits)
# for i, (train_index, test_index) in enumerate(cv.split(X, y)):
# X_train = X[train_index]
# y_train = y[train_index]
# X_test = X[test_index]
# y_test = y[test_index]
# outer_split = '{}-{}'.format(i+1, cv.n_splits)
best_params_df = param_df_dict.get(outer_split)
if params is not None:
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_params_df.index:
best_params_df.at[ind, key] = value
else:
best_params_df[key] = value
if model_name == 'RF':
if PI is not None:
bdf, roc, fi, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
else:
bdf, roc, fi = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
fis.append(fi)
else:
if PI is not None:
bdf, roc, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
else:
bdf, roc = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
if PI is not None:
pi_means.append(pi_mean)
pi_stds.append(pi_std)
bdf.index.name = 'scorer'
roc.index.name = 'FPR'
if 'hidden_layer_sizes' in bdf.columns:
bdf['hidden_layer_sizes'] = bdf['hidden_layer_sizes'].astype(str)
bdf_da = bdf.to_xarray()
roc_da = roc.to_xarray().to_array('scorer')
roc_da.name = 'TPR'
outer_bests.append(bdf_da)
outer_rocs.append(roc_da)
best_da = xr.concat(outer_bests, 'outer_split')
roc_da = xr.concat(outer_rocs, 'outer_split')
best = xr.merge([best_da, roc_da])
best['outer_split'] = np.arange(1, n_splits + 1)
if model_name == 'RF':
fi_da = xr.concat(fis, 'outer_split')
best['feature_importances'] = fi_da
if PI is not None:
pi_mean_da = xr.concat(pi_means, 'outer_split')
pi_std_da = xr.concat(pi_stds, 'outer_split')
best['PI_mean'] = pi_mean_da
best['PI_std'] = pi_std_da
bests.append(best)
if len(bests) == 1:
return bests[0]
else:
best_ds = xr.concat(bests, 'neg_sample')
best_ds['neg_sample'] = np.arange(1, sample_from_negatives + 1)
return best_ds
def run_permutation_classifier_test(X, y, cv, best_params_df, Ptest=100,
model_name='SVC', verbose=False, params=None):
from sklearn.model_selection import permutation_test_score
import xarray as xr
import numpy as np
def run_one_permutation_test(X=X, y=y, cv=cv, bp_df=best_params_df,
model_name=model_name, n_perm=Ptest,
verbose=verbose):
true_scores = []
pvals = []
perm_scores = []
for scorer in bp_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
b_params = bp_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, b_params))
true, perm_scrs, pval = permutation_test_score(sk_model, X, y,
cv=cv,
n_permutations=Ptest,
scoring=scorers(scorer),
random_state=0,
n_jobs=-1)
true_scores.append(true)
pvals.append(pval)
perm_scores.append(perm_scrs)
true_da = xr.DataArray(true_scores, dims=['scorer'])
true_da['scorer'] = [x for x in bp_df.index.values]
true_da.name = 'true_score'
pval_da = xr.DataArray(pvals, dims=['scorer'])
pval_da['scorer'] = [x for x in bp_df.index.values]
pval_da.name = 'pvalue'
perm_da = xr.DataArray(perm_scores, dims=['scorer', 'permutations'])
perm_da['scorer'] = [x for x in bp_df.index.values]
perm_da['permutations'] = np.arange(1, Ptest+1)
perm_da.name = 'permutation_score'
ds = xr.merge([true_da, pval_da, perm_da])
return ds
ml = ML_Classifier_Switcher()
if params is not None:
best_p_df = best_params_df['1-{}'.format(len(best_params_df))]
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_p_df.index:
best_p_df.at[ind, key] = value
else:
best_p_df[key] = value
dss = run_one_permutation_test(bp_df=best_p_df)
else:
if verbose:
print('Picking {} model with best params'.format(model_name))
splits = []
for i, df in enumerate(best_params_df.values()):
if verbose:
print('running on split #{}'.format(i+1))
ds = run_one_permutation_test()
splits.append(ds)
dss = xr.concat(splits, dim='outer_split')
dss['outer_split'] = np.arange(1, len(best_params_df)+ 1)
return dss
def run_test_on_CV_split(X_train, y_train, X_test, y_test, param_df,
model_name='SVC', verbose=False, PI=None,
Ptest=None):
import numpy as np
import xarray as xr
import pandas as pd
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.inspection import permutation_importance
best_df = param_df.copy()
ml = ML_Classifier_Switcher()
if verbose:
print('Picking {} model with best params'.format(model_name))
# print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
pi_mean_list = []
pi_std_list = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
# print(X_train['feature'])
# input('press any key')
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
if PI is not None:
pi = permutation_importance(sk_model, X_test, y_test,
n_repeats=PI,
scoring=scorers(scorer),
random_state=0, n_jobs=-1)
pi_mean = xr.DataArray(pi['importances_mean'], dims='feature')
pi_std = xr.DataArray(pi['importances_std'], dims='feature')
pi_mean.name = 'PI_mean'
pi_std.name = 'PI_std'
pi_mean['feature'] = X_train['feature']
pi_std['feature'] = X_train['feature']
pi_mean_list.append(pi_mean)
pi_std_list.append(pi_std)
if PI is not None:
pi_mean_da = xr.concat(pi_mean_list, 'scorer')
pi_std_da = xr.concat(pi_std_list, 'scorer')
pi_mean_da['scorer'] = [x for x in best_df.index.values]
pi_std_da['scorer'] = [x for x in best_df.index.values]
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['test_score'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if hasattr(sk_model, 'feature_importances_'):
fi = xr.concat(fi_list, 'scorer')
fi['scorer'] = [x for x in best_df.index.values]
if PI is not None:
return best_df, roc_df, fi, pi_mean_da, pi_std_da
else:
return best_df, roc_df, fi
elif PI is not None:
return best_df, roc_df, pi_mean_da, pi_std_da
else:
return best_df, roc_df
def holdout_test(path=hydro_path, gr_path=hydro_ml_path/'holdout',
model_name='SVC', features='pwv', return_RF_FI=False,
verbose=False):
"""do a holdout test with best model from gridsearchcv
with all scorers"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import xarray as xr
import pandas as pd
import numpy as np
# process gridsearchcv results:
best_df, test_ratio, seed = load_one_gridsearchcv_object(path=gr_path,
cv_type='holdout',
features=features,
model_name=model_name,
verbose=False)
print('Using random seed of {} and {}% test ratio'.format(seed, test_ratio))
ts = int(test_ratio) / 100
X, y = prepare_X_y_for_holdout_test(features, model_name, path)
# split using test_size and seed:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=ts,
random_state=int(seed),
stratify=y)
if verbose:
print('y train pos/neg:{}, {}'.format((y_train==1).sum().item(),(y_train==0).sum().item()))
print('y test pos/neg:{}, {}'.format((y_test==1).sum().item(),(y_test==0).sum().item()))
# pick model and set the params to best from gridsearchcv:
ml = ML_Classifier_Switcher()
print('Picking {} model with best params'.format(model_name))
print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['holdout_test_scores'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if fi_list and return_RF_FI:
da = xr.concat(fi_list, 'scorer')
da['scorer'] = best_df.index.values
da.name = 'RF_feature_importances'
return da
return best_df, roc_df
def load_one_gridsearchcv_object(path=hydro_ml_path, cv_type='holdout', features='pwv',
model_name='SVC', verbose=True):
"""load one gridsearchcv obj with model_name and features and run read_one_gridsearchcv_object"""
from aux_gps import path_glob
import joblib
# first filter for model name:
if verbose:
print('loading GridsearchCVs results for {} model with {} cv type'.format(model_name, cv_type))
model_files = path_glob(path, 'GRSRCHCV_{}_*.pkl'.format(cv_type))
model_files = [x for x in model_files if model_name in x.as_posix()]
# now select features:
if verbose:
print('loading GridsearchCVs results with {} features'.format(features))
model_features = [x.as_posix().split('/')[-1].split('_')[3] for x in model_files]
feat_ind = get_feature_set_from_list(model_features, features)
# also get the test ratio and seed number:
if len(feat_ind) > 1:
if verbose:
print('found {} GR objects.'.format(len(feat_ind)))
files = sorted([model_files[x] for x in feat_ind])
outer_splits = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-3] for x in files]
grs = [joblib.load(x) for x in files]
best_dfs = [read_one_gridsearchcv_object(x) for x in grs]
di = dict(zip(outer_splits, best_dfs))
return di
else:
file = model_files[feat_ind]
seed = file.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
outer_splits = file.as_posix().split('/')[-1].split('.')[0].split('_')[-3]
# load and produce best_df:
gr = joblib.load(file)
best_df = read_one_gridsearchcv_object(gr)
return best_df, outer_splits, seed
def get_feature_set_from_list(model_features_list, features, sep='+'):
"""select features from model_features_list,
return the index in the model_features_list and the entry itself"""
# first find if features is a single or multiple features:
if isinstance(features, str) and sep not in features:
try:
ind = [i for i, e in enumerate(model_features_list) if e == features]
# ind = model_features_list.index(features)
except ValueError:
raise ValueError('{} is not in {}'.format(features, ', '.join(model_features_list)))
elif isinstance(features, str) and sep in features:
features_split = features.split(sep)
mf = [x.split(sep) for x in model_features_list]
bool_list = [set(features_split) == (set(x)) for x in mf]
ind = [i for i, x in enumerate(bool_list) if x]
# print(len(ind))
# ind = ind[0]
# feat = model_features_list[ind]
# feat = model_features_list[ind]
return ind
def read_one_gridsearchcv_object(gr):
"""read one gridsearchcv multimetric object and
get the best params, best mean/std scores"""
import pandas as pd
# first get all the scorers used:
scorers = [x for x in gr.scorer_.keys()]
# now loop over the scorers:
best_params = []
best_mean_scores = []
best_std_scores = []
for scorer in scorers:
df_mean = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["mean_test_{}".format(scorer)], columns=[scorer])], axis=1)
df_std = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["std_test_{}".format(scorer)], columns=[scorer])], axis=1)
# best index = highest score:
best_ind = df_mean[scorer].idxmax()
best_mean_scores.append(df_mean.iloc[best_ind][scorer])
best_std_scores.append(df_std.iloc[best_ind][scorer])
best_params.append(df_mean.iloc[best_ind].to_frame().T.iloc[:, :-1])
best_df = pd.concat(best_params)
best_df['mean_score'] = best_mean_scores
best_df['std_score'] = best_std_scores
best_df.index = scorers
return best_df
# # param grid dict:
# params = gr.param_grid
# # scorer names:
# scoring = [x for x in gr.scoring.keys()]
# # df:
# df = pd.DataFrame().from_dict(gr.cv_results_)
# # produce multiindex from param_grid dict:
# param_names = [x for x in params.keys()]
# # unpack param_grid vals to list of lists:
# pro = [[y for y in x] for x in params.values()]
# ind = pd.MultiIndex.from_product((pro), names=param_names)
# df.index = ind
# best_params = []
# best_mean_scores = []
# best_std_scores = []
# for scorer in scoring:
# best_params.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].index[0])
# best_mean_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].iloc[0])
# best_std_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['std_test_{}'.format(scorer)].iloc[0])
# best_df = pd.DataFrame(best_params, index=scoring, columns=param_names)
# best_df['mean_score'] = best_mean_scores
# best_df['std_score'] = best_std_scores
# return best_df, best_df_1
def process_gridsearch_results(GridSearchCV, model_name,
split_dim='inner_kfold', features=None,
pwv_id=None, hs_id=None, test_size=None):
import xarray as xr
import pandas as pd
import numpy as np
# finish getting best results from all scorers togather
"""takes GridSreachCV object with cv_results and xarray it into dataarray"""
params = GridSearchCV.param_grid
scoring = GridSearchCV.scoring
results = GridSearchCV.cv_results_
# for scorer in scoring:
# for sample in ['train', 'test']:
# sample_score_mean = results['mean_{}_{}'.format(sample, scorer)]
# sample_score_std = results['std_{}_{}'.format(sample, scorer)]
# best_index = np.nonzero(results['rank_test_{}'.format(scorer)] == 1)[0][0]
# best_score = results['mean_test_{}'.format(scorer)][best_index]
names = [x for x in params.keys()]
# unpack param_grid vals to list of lists:
pro = [[y for y in x] for x in params.values()]
ind = pd.MultiIndex.from_product((pro), names=names)
# result_names = [x for x in GridSearchCV.cv_results_.keys() if 'split'
# not in x and 'time' not in x and 'param' not in x and
# 'rank' not in x]
result_names = [
x for x in results.keys() if 'param' not in x]
ds = xr.Dataset()
for da_name in result_names:
da = xr.DataArray(results[da_name])
ds[da_name] = da
ds = ds.assign(dim_0=ind).unstack('dim_0')
for dim in ds.dims:
if ds[dim].dtype == 'O':
try:
ds[dim] = ds[dim].astype(str)
except ValueError:
ds = ds.assign_coords({dim: [str(x) for x in ds[dim].values]})
if ('True' in ds[dim]) and ('False' in ds[dim]):
ds[dim] = ds[dim] == 'True'
# get all splits data and concat them along number of splits:
all_splits = [x for x in ds.data_vars if 'split' in x]
train_splits = [x for x in all_splits if 'train' in x]
test_splits = [x for x in all_splits if 'test' in x]
# loop over scorers:
trains = []
tests = []
for scorer in scoring:
train_splits_scorer = [x for x in train_splits if scorer in x]
trains.append(xr.concat([ds[x]
for x in train_splits_scorer], split_dim))
test_splits_scorer = [x for x in test_splits if scorer in x]
tests.append(xr.concat([ds[x] for x in test_splits_scorer], split_dim))
splits_scorer = np.arange(1, len(train_splits_scorer) + 1)
train_splits = xr.concat(trains, 'scoring')
test_splits = xr.concat(tests, 'scoring')
# splits = [x for x in range(len(train_splits))]
# train_splits = xr.concat([ds[x] for x in train_splits], 'split')
# test_splits = xr.concat([ds[x] for x in test_splits], 'split')
# replace splits data vars with newly dataarrays:
ds = ds[[x for x in ds.data_vars if x not in all_splits]]
ds['split_train_score'] = train_splits
ds['split_test_score'] = test_splits
ds[split_dim] = splits_scorer
if isinstance(scoring, list):
ds['scoring'] = scoring
elif isinstance(scoring, dict):
ds['scoring'] = [x for x in scoring.keys()]
ds.attrs['name'] = 'CV_results'
ds.attrs['param_names'] = names
ds.attrs['model_name'] = model_name
ds.attrs['{}_splits'.format(split_dim)] = ds[split_dim].size
if GridSearchCV.refit:
if hasattr(GridSearchCV.best_estimator_, 'feature_importances_'):
f_import = xr.DataArray(
GridSearchCV.best_estimator_.feature_importances_,
dims=['feature'])
f_import['feature'] = features
ds['feature_importances'] = f_import
ds['best_score'] = GridSearchCV.best_score_
# ds['best_model'] = GridSearchCV.best_estimator_
ds.attrs['refitted_scorer'] = GridSearchCV.refit
for name in names:
if isinstance(GridSearchCV.best_params_[name], tuple):
GridSearchCV.best_params_[name] = ','.join(
map(str, GridSearchCV.best_params_[name]))
ds['best_{}'.format(name)] = GridSearchCV.best_params_[name]
return ds, GridSearchCV.best_estimator_
else:
return ds, None
def save_cv_results(cvr, savepath=hydro_path):
from aux_gps import save_ncfile
features = '+'.join(cvr.attrs['features'])
# pwv_id = cvr.attrs['pwv_id']
# hs_id = cvr.attrs['hs_id']
# neg_pos_ratio = cvr.attrs['neg_pos_ratio']
ikfolds = cvr.attrs['inner_kfold_splits']
okfolds = cvr.attrs['outer_kfold_splits']
name = cvr.attrs['model_name']
refitted_scorer = cvr.attrs['refitted_scorer'].replace('_', '-')
# filename = 'CVR_{}_{}_{}_{}_{}_{}_{}_{}.nc'.format(pwv_id, hs_id,
# name, features, refitted_scorer, ikfolds, okfolds, neg_pos_ratio)
filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
name, features, refitted_scorer, ikfolds, okfolds)
save_ncfile(cvr, savepath, filename)
return
def scikit_fit_predict(X, y, seed=42, with_pressure=True, n_splits=7,
plot=True):
# step1: CV for train/val (80% from 80-20 test). display results with
# model and scores(AUC, f1), use StratifiedKFold
# step 2: use validated model with test (20%) and build ROC curve
# step 3: add features (pressure) but check for correlation
# check permutations with scikit learn
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import f1_score
from sklearn.metrics import plot_roc_curve
from sklearn.svm import SVC
from numpy import interp
from sklearn.metrics import auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
if not with_pressure:
just_pw = [x for x in X.feature.values if 'pressure' not in x]
X = X.sel(feature=just_pw)
X_tt, X_test, y_tt, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=seed)
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
# cv = LeaveOneOut()
classifier = SVC(kernel='rbf', probability=False,
random_state=seed)
# classifier = LinearDiscriminantAnalysis()
# clf = QuadraticDiscriminantAnalysis()
scores = []
fig, ax = plt.subplots()
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X_tt, y_tt)):
# for i in range(100):
# X_train, X_val, y_train, y_val = train_test_split(
# X_tt, y_tt, shuffle=True, test_size=0.5, random_state=i)
# clf.fit(X_train, y_train)
classifier.fit(X_tt[train], y_tt[train])
# viz = plot_roc_curve(clf, X_val, y_val,
# name='ROC run {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
viz = plot_roc_curve(classifier, X_tt[val], y_tt[val],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
# aucs.append(viz.roc_auc)
# y_pred = clf.predict(X_val)
y_pred = classifier.predict(X_tt[val])
aucs.append(roc_auc_score(y_tt[val], y_pred))
# scores.append(clf.score(X_val, y_val))
scores.append(f1_score(y_tt[val], y_pred))
scores = np.array(scores)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
ax.set_title(
'ROC curve for KFold={}, with pressure anomalies.'.format(n_splits))
if not with_pressure:
ax.set_title(
'ROC curve for KFold={}, without pressure anomalies.'.format(n_splits))
y_test_predict = classifier.predict(X_test)
print('final test predict score:')
print(f1_score(y_test, y_test_predict))
if plot:
plt.figure()
plt.hist(scores, bins=15, edgecolor='k')
return scores
# clf.fit(X,y)
def produce_X_y_from_list(pw_stations=['drag', 'dsea', 'elat'],
hs_ids=[48125, 48199, 60170],
pressure_station='bet-dagan', max_flow=0,
window=25, neg_pos_ratio=1, path=work_yuval,
ims_path=ims_path, hydro_path=hydro_path,
concat_Xy=False):
if isinstance(hs_ids, int):
hs_ids = [hs_ids for x in range(len(pw_stations))]
kwargs = locals()
[kwargs.pop(x) for x in ['pw_stations', 'hs_ids', 'concat_Xy']]
Xs = []
ys = []
for pw_station, hs_id in list(zip(pw_stations, hs_ids)):
X, y = produce_X_y(pw_station, hs_id, **kwargs)
Xs.append(X)
ys.append(y)
if concat_Xy:
print('concatenating pwv stations {}, with hydro_ids {}.'.format(
pw_stations, hs_ids))
X, y = concat_X_y(Xs, ys)
return X, y
else:
return Xs, ys
def concat_X_y(Xs, ys):
import xarray as xr
import pandas as pd
X_attrs = [x.attrs for x in Xs]
X_com_attrs = dict(zip(pd.DataFrame(X_attrs).T.index.values,
pd.DataFrame(X_attrs).T.values.tolist()))
y_attrs = [x.attrs for x in ys]
y_com_attrs = dict(zip(pd.DataFrame(y_attrs).T.index.values,
pd.DataFrame(y_attrs).T.values.tolist()))
for X in Xs:
feat = [x.replace('_' + X.attrs['pwv_id'], '')
for x in X.feature.values]
X['feature'] = feat
X = xr.concat(Xs, 'sample')
X.attrs = X_com_attrs
y = xr.concat(ys, 'sample')
y.attrs = y_com_attrs
return X, y
def produce_X_y(pw_station='drag', hs_id=48125, pressure_station='bet-dagan',
window=25, seed=42,
max_flow=0, neg_pos_ratio=1, path=work_yuval,
ims_path=ims_path, hydro_path=hydro_path):
import xarray as xr
from aux_gps import anomalize_xr
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
# call preprocess_hydro_station
hdf, y_meta = preprocess_hydro_station(
hs_id, hydro_path, max_flow=max_flow)
# load PWV and other features and combine them to fdf:
pw = xr.open_dataset(path / 'GNSS_PW_anom_hourly_50_hour_dayofyear.nc')
fdf = pw[pw_station].to_dataframe(name='pwv_{}'.format(pw_station))
# add Day of year to fdf:
doy = fdf.index.dayofyear
# scale doy to cyclic with amp ~1:
fdf['doy_sin'] = np.sin(doy * np.pi / 183)
fdf['doy_cos'] = np.cos(doy * np.pi / 183)
if pressure_station is not None:
p = xr.load_dataset(
ims_path /
'IMS_BD_hourly_ps_1964-2020.nc')[pressure_station]
p_attrs = p.attrs
p_attrs = {'pressure_{}'.format(
key): val for key, val in p_attrs.items()}
p = p.sel(time=slice('1996', None))
p = anomalize_xr(p, freq='MS')
fdf['pressure_{}'.format(pressure_station)] = p.to_dataframe()
# check the the last date of hdf is bigger than the first date of fdf,
# i.e., there is at least one overlapping event in the data:
if hdf.index[-1] < fdf.index[0]:
raise KeyError('Data not overlapping, hdf for {} stops at {} and fdf starts at {}'.format(
hs_id, hdf.index[-1], fdf.index[0]))
# finally, call add_features_and_produce_X_y
X, y = add_features_and_produce_X_y(hdf, fdf, window_size=window,
seed=seed,
neg_pos_ratio=neg_pos_ratio)
# add meta data:
gps = produce_geo_gnss_solved_stations(plot=False)
pwv_attrs = gps.loc[pw_station, :][['lat', 'lon', 'alt', 'name']].to_dict()
pwv_attrs = {'pwv_{}'.format(key): val for key, val in pwv_attrs.items()}
X.attrs = pwv_attrs
if pressure_station is not None:
X.attrs.update(p_attrs)
y.attrs = y_meta
y.attrs['hydro_station_id'] = hs_id
y.attrs['neg_pos_ratio'] = neg_pos_ratio
# calculate distance to hydro station:
lat1 = X.attrs['pwv_lat']
lon1 = X.attrs['pwv_lon']
lat2 = y.attrs['lat']
lon2 = y.attrs['lon']
y.attrs['max_flow'] = max_flow
distance = calculate_distance_between_two_latlons_israel(
lat1, lon1, lat2, lon2)
X.attrs['distance_to_hydro_station_in_km'] = distance / 1000.0
y.attrs['distance_to_pwv_station_in_km'] = distance / 1000.0
X.attrs['pwv_id'] = pw_station
return X, y
# def produce_X_y(station='drag', hs_id=48125, lag=25, anoms=True,
# neg_pos_ratio=2, add_pressure=False,
# path=work_yuval, hydro_path=hydro_path, with_ends=False,
# seed=42,
# verbose=True, return_xarray=False, pressure_anoms=None):
# import pandas as pd
# import numpy as np
# import xarray as xr
#
# def produce_da_from_list(event_list, feature='pwv'):
# X_da = xr.DataArray(event_list, dims=['sample', 'feature'])
# X_da['feature'] = ['{}_{}'.format(feature, x) for x in np.arange(0, 24, 1)]
# X_df = pd.concat(event_list)
# X_da['sample'] = [x for x in X_df.index[::24]]
# return X_da
#
# df = preprocess_hydro_pw(
# pw_station=station,
# hs_id=hs_id,
# path=path,
# hydro_path=hydro_path,
# with_tide_ends=with_ends, anoms=anoms,
# pressure_anoms=pressure_anoms,
# add_pressure=add_pressure)
# if pressure_anoms is not None:
# station = pressure_anoms.name
# # first produce all the positives:
# # get the tides datetimes:
# y_pos = df[df['tides'] == 1]['tides']
# # get the datetimes of 24 hours before tide event (not inclusive):
# y_lag_pos = y_pos.index - pd.Timedelta(lag, unit='H')
# masks = [(df.index > start) & (df.index < end)
# for start, end in zip(y_lag_pos, y_pos.index)]
# # also drop event if less than 24 hour before available:
# pw_pos_list = []
# pressure_pos_list = []
# ind = []
# bad_ind = []
# for i, tide in enumerate(masks):
# if len(df['tides'][tide]) == (lag - 1):
# pw_pos_list.append(df[station][tide])
# pressure_pos_list.append(df['pressure'][tide])
# ind.append(i)
# else:
# bad_ind.append(i)
# # get the indices of the dropped events:
# # ind = [x[0] for x in pw_pos_list]
# if bad_ind:
# if verbose:
# print('{} are without full 24 hours before record.'.format(
# ','.join([x for x in df.iloc[bad_ind].index.strftime('%Y-%m-%d:%H:00:00')])))
# # drop the events in y so len(y) == in each x from tides_list:
# y_pos_arr = y_pos.iloc[ind].values
# # now get the negative y's with neg_pos_ratio (set to 1 if the same pos=neg):
# y_neg_arr = np.zeros(y_pos_arr.shape[0] * neg_pos_ratio)
# cnt = 0
# pw_neg_list = []
# pressure_neg_list = []
# np.random.seed(seed)
# while cnt < len(y_neg_arr):
# # get a random date from df:
# r = np.random.randint(low=0, high=len(df))
# # slice -24 to 24 range with t=0 being the random date:
# # update: extend the range to -72 hours to 72 hours:
# lag_factor = 72 / lag
# slice_range = int(lag * lag_factor)
# sliced = df.iloc[r - slice_range:r + slice_range]
# # if tides inside this date range, continue:
# if y_pos.iloc[ind].index in sliced.index:
# if verbose:
# print('found positive tide in randomly sliced 48 window')
# continue
# # now if no 24 items exist, also continue:
# negative = df.iloc[r - lag:r - 1][station]
# if len(negative) != (lag-1):
# if verbose:
# print('didnt find full {} hours sliced negative'.format(lag-1))
# continue
# # else, append to pw_neg_list and increase cnt
# pw_neg_list.append(negative)
# pressure_neg_list.append(df.iloc[r - lag:r - 1]['pressure'])
# cnt += 1
# # lastly, assemble for X, y using np.columnstack:
# y = np.concatenate([y_pos_arr, y_neg_arr])
# X = np.stack([[x.values for x in pw_pos_list] +
# [x.values for x in pw_neg_list]])
# X = X.squeeze()
# pw_pos_da = produce_da_from_list(pw_pos_list, feature='pwv')
# pw_neg_da = produce_da_from_list(pw_neg_list, feature='pwv')
# pr_pos_da = produce_da_from_list(pressure_pos_list, feature='pressure')
# pr_neg_da = produce_da_from_list(pressure_neg_list, feature='pressure')
# if return_xarray:
# y = xr.DataArray(y, dims='sample')
# X_pwv = xr.concat([pw_pos_da, pw_neg_da], 'sample')
# X_pressure = xr.concat([pr_pos_da, pr_neg_da], 'sample')
# X = xr.concat([X_pwv, X_pressure], 'feature')
# X.name = 'X'
# y['sample'] = X['sample']
# y.name = 'y'
# X.attrs['PWV_station'] = station
# X.attrs['hydro_station_id'] = hs_id
# y.attrs = X.attrs
# return X, y
# else:
# return X, y
def plot_Xpos_Xneg_mean_std(X_pos_da, X_neg_da):
import matplotlib.pyplot as plt
from PW_from_gps_figures import plot_field_with_fill_between
fig, ax = plt.subplots(figsize=(8, 6))
posln = plot_field_with_fill_between(X_pos_da, ax=ax, mean_dim='event',
dim='time', color='b', marker='s')
negln = plot_field_with_fill_between(X_neg_da, ax=ax, mean_dim='event',
dim='time', color='r', marker='o')
ax.legend(posln+negln, ['Positive tide events', 'Negative tide events'])
ax.grid()
return fig
def preprocess_hydro_station(hs_id=48125, hydro_path=hydro_path, max_flow=0,
with_tide_ends=False):
"""load hydro station tide events with max_flow and round it up to
hourly sample rate, with_tide_ends, puts the value 2 at the datetime of
tide end. regardless 1 is the datetime for tide event."""
import xarray as xr
import pandas as pd
import numpy as np
# first load tides data:
all_tides = xr.open_dataset(hydro_path / 'hydro_tides.nc')
# get all tides for specific station without nans:
sta_slice = [x for x in all_tides.data_vars if str(hs_id) in x]
sta_slice = [
x for x in sta_slice if 'max_flow' in x or 'tide_end' in x or 'tide_max' in x]
if not sta_slice:
raise KeyError('hydro station {} not found in database'.format(hs_id))
tides = all_tides[sta_slice].dropna('tide_start')
max_flow_tide = tides['TS_{}_max_flow'.format(hs_id)]
max_flow_attrs = max_flow_tide.attrs
tide_starts = tides['tide_start'].where(
~tides.isnull()).where(max_flow_tide > max_flow).dropna('tide_start')['tide_start']
tide_ends = tides['TS_{}_tide_end'.format(hs_id)].where(
~tides.isnull()).where(max_flow_tide > max_flow).dropna('tide_start')['TS_{}_tide_end'.format(hs_id)]
max_flows = max_flow_tide.where(
max_flow_tide > max_flow).dropna('tide_start')
# round all tide_starts to hourly:
ts = tide_starts.dt.round('1H')
max_flows = max_flows.sel(tide_start=ts, method='nearest')
max_flows['tide_start'] = ts
ts_end = tide_ends.dt.round('1H')
time_dt = pd.date_range(
start=ts.min().values,
end=ts_end.max().values,
freq='1H')
df = pd.DataFrame(data=np.zeros(time_dt.shape), index=time_dt)
df.loc[ts.values, 0] = 1
df.loc[ts.values, 1] = max_flows.loc[ts.values]
df.columns = ['tides', 'max_flow']
df = df.fillna(0)
if with_tide_ends:
df.loc[ts_end.values, :] = 2
return df, max_flow_attrs
def add_features_and_produce_X_y(hdf, fdf, window_size=25, seed=42,
neg_pos_ratio=1, plot=False):
"""hdf is the hydro events df and fdf is the features df in 'H' freq.
This function checks the fdf for window-sized data and hour before
each positive event.
returns the combined df (hdf+fdf) the positive events labels and features.
"""
import pandas as pd
import numpy as np
import xarray as xr
# first add check_window_size of 0's to hdf:
st = hdf.index[0] - pd.Timedelta(window_size, unit='H')
en = hdf.index[0]
dts = pd.date_range(st, en - pd.Timedelta(1, unit='H'), freq='H')
mdf = pd.DataFrame(
np.zeros(window_size),
index=dts,
columns=['tides'])
hdf = pd.concat([hdf, mdf], axis=0)
# check for hourly sample rate and concat:
if not pd.infer_freq(fdf.index) == 'H':
raise('pls resample fdf to hourly...')
feature = [x for x in fdf.columns]
df = pd.concat([hdf, fdf], axis=1)
# get the tides(positive events) datetimes:
y_pos = df[df['tides'] == 1]['tides']
# get the datetimes of 24 hours before tide event (not inclusive):
y_lag_pos = y_pos.index - pd.Timedelta(window_size, unit='H')
masks = [(df.index > start) & (df.index < end)
for start, end in zip(y_lag_pos, y_pos.index)]
# first check how many full periods of data the feature has:
avail = [window_size - 1 - df[feature][masks[x]].isnull().sum()
for x in range(len(masks))]
adf = pd.DataFrame(avail, index=y_pos.index, columns=feature)
if plot:
adf.plot(kind='bar')
# produce the positive events datetimes for which all the features have
# window sized data and hour before the event:
good_dts = adf[adf.loc[:, feature] == window_size - 1].dropna().index
# y array of positives (1's):
y_pos_arr = y_pos.loc[good_dts].values
# now produce the feature list itself:
good_inds_for_masks = [adf.index.get_loc(x) for x in good_dts]
good_masks = [masks[x] for x in good_inds_for_masks]
feature_pos_list = [df[feature][x].values for x in good_masks]
dts_pos_list = [df[feature][x].index[-1] +
pd.Timedelta(1, unit='H') for x in good_masks]
# TODO: add diagnostic mode for how and where are missing features
# now get the negative y's with neg_pos_ratio
# (set to 1 if the same pos=neg):
y_neg_arr = np.zeros(y_pos_arr.shape[0] * neg_pos_ratio)
cnt = 0
feature_neg_list = []
dts_neg_list = []
np.random.seed(seed)
while cnt < len(y_neg_arr):
# get a random date from df:
r = np.random.randint(low=0, high=len(df))
# slice -24 to 24 range with t=0 being the random date:
# update: extend the range to -72 hours to 72 hours:
window_factor = 72 / window_size
slice_range = int(window_size * window_factor)
sliced = df.iloc[r - slice_range:r + slice_range]
# if tides inside this date range, continue:
# try:
if not (y_pos.loc[good_dts].index.intersection(sliced.index)).empty:
# print('#')
continue
# except TypeError:
# return y_pos, good_dts, sliced
# now if no 24 items exist, also continue:
negative = df.iloc[r - window_size:r - 1][feature].dropna().values
if len(negative) != (window_size - 1):
# print('!')
continue
# get the negative datetimes (last record)
neg_dts = df.iloc[r - window_size:r -
1][feature].dropna().index[-1] + pd.Timedelta(1, unit='H')
# else, append to pw_neg_list and increase cnt
feature_neg_list.append(negative)
dts_neg_list.append(neg_dts)
cnt += 1
# print(cnt)
# lastly, assemble for X, y using np.columnstack:
y = np.concatenate([y_pos_arr, y_neg_arr])
# TODO: add exception where no features exist, i.e., there is no
# pw near flood events at all...
Xpos_da = xr.DataArray(feature_pos_list, dims=['sample', 'window', 'feat'])
Xpos_da['window'] = np.arange(0, window_size - 1)
Xpos_da['feat'] = adf.columns
Xpos_da['sample'] = dts_pos_list
Xneg_da = xr.DataArray(feature_neg_list, dims=['sample', 'window', 'feat'])
Xneg_da['window'] = np.arange(0, window_size - 1)
Xneg_da['feat'] = adf.columns
Xneg_da['sample'] = dts_neg_list
X = xr.concat([Xpos_da, Xneg_da], 'sample')
# if feature_pos_list[0].shape[1] > 0 and feature_neg_list[0].shape[1] > 0:
# xpos = [x.ravel() for x in feature_pos_list]
# xneg = [x.ravel() for x in feature_neg_list]
# X = np.column_stack([[x for x in xpos] +
# [x for x in xneg]])
y_dts = np.stack([[x for x in dts_pos_list]+[x for x in dts_neg_list]])
y_dts = y_dts.squeeze()
X_da = X.stack(feature=['feat', 'window'])
feature = ['_'.join([str(x), str(y)]) for x, y in X_da.feature.values]
X_da['feature'] = feature
y_da = xr.DataArray(y, dims=['sample'])
y_da['sample'] = y_dts
# feats = []
# for f in feature:
# feats.append(['{}_{}'.format(f, x) for x in np.arange(0, window_size
# - 1, 1)])
# X_da['feature'] = [item for sublist in feats for item in sublist]
return X_da, y_da
# def preprocess_hydro_pw(pw_station='drag', hs_id=48125, path=work_yuval,
# ims_path=ims_path,
# anoms=True, hydro_path=hydro_path, max_flow=0,
# with_tide_ends=False, pressure_anoms=None,
# add_pressure=False):
# import xarray as xr
# import pandas as pd
# import numpy as np
# from aux_gps import anomalize_xr
# # df.columns = ['tides']
# # now load pw:
# if anoms:
# pw = xr.load_dataset(path / 'GNSS_PW_anom_hourly_50_hour_dayofyear.nc')[pw_station]
# else:
# pw = xr.load_dataset(path / 'GNSS_PW_hourly_thresh_50.nc')[pw_station]
# if pressure_anoms is not None:
# pw = pressure_anoms
# pw_df = pw.dropna('time').to_dataframe()
# # now align the both dataframes:
# pw_df['tides'] = df['tides']
# pw_df['max_flow'] = df['max_flow']
# if add_pressure:
# pressure = xr.load_dataset(ims_path / 'IMS_BP_israeli_hourly.nc')['JERUSALEM-CENTRE']
# pressure = anomalize_xr(pressure, freq='MS')
# pr_df = pressure.dropna('time').to_dataframe()
# pw_df['pressure'] = pr_df
# pw_df = pw_df.fillna(0)
# return pw_df
def loop_over_gnss_hydro_and_aggregate(sel_hydro, pw_anom=False,
pressure_anoms=None,
max_flow_thresh=None,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=5,
ndays_forward=1,
plot=True, plot_all=False):
import xarray as xr
import matplotlib.pyplot as plt
from aux_gps import path_glob
filename = 'PW_tide_sites_{}_{}.nc'.format(ndays, ndays_forward)
if pw_anom:
filename = 'PW_tide_sites_anom_{}_{}.nc'.format(ndays, ndays_forward)
gnss_stations = []
if (hydro_path / filename).is_file():
print('loading {}...'.format(filename))
ds = xr.load_dataset(hydro_path / filename)
else:
if pw_anom:
file = path_glob(work_yuval, 'GNSS_PW_anom_*.nc')[-1]
gnss_pw = xr.open_dataset(file)
else:
gnss_pw = xr.open_dataset(
work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
just_pw = [x for x in gnss_pw.data_vars if '_error' not in x]
gnss_pw = gnss_pw[just_pw]
da_list = []
for i, gnss_sta in enumerate(just_pw):
print('proccessing station {}'.format(gnss_sta))
sliced = sel_hydro[~sel_hydro[gnss_sta].isnull()]
hydro_ids = [x for x in sliced.id.values]
if not hydro_ids:
print(
'skipping {} station since no close hydro stations...'.format(gnss_sta))
continue
else:
try:
if pressure_anoms is not None:
pname = pressure_anoms.name
dass = aggregate_get_ndays_pw_hydro(
pressure_anoms,
hydro_ids,
max_flow_thresh=max_flow_thresh,
ndays=ndays, ndays_forward=ndays_forward,
plot=plot_all)
gnss_stations.append(gnss_sta)
dass.name = '{}_{}'.format(pname, i)
else:
dass = aggregate_get_ndays_pw_hydro(
gnss_pw[gnss_sta],
hydro_ids,
max_flow_thresh=max_flow_thresh,
ndays=ndays, ndays_forward=ndays_forward,
plot=plot_all)
da_list.append(dass)
except ValueError as e:
print('skipping {} because {}'.format(gnss_sta, e))
continue
ds = xr.merge(da_list)
ds.to_netcdf(hydro_path / filename, 'w')
if plot:
names = [x for x in ds.data_vars]
fig, ax = plt.subplots()
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
if pressure_anoms is not None:
names = [x.split('_')[0] for x in ds.data_vars]
names = [x + ' ({})'.format(y)
for x, y in zip(names, gnss_stations)]
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
if pw_anom:
title = 'Mean PWV anomalies for tide stations near all GNSS stations'
ylabel = 'PWV anomalies [mm]'
else:
title = 'Mean PWV for tide stations near all GNSS stations'
ylabel = 'PWV [mm]'
if max_flow_thresh is not None:
title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
if pressure_anoms is not None:
ylabel = 'Surface pressure anomalies [hPa]'
title = 'Mean surface pressure anomaly in {} for all tide stations near GNSS stations'.format(
pname)
ax.set_title(title)
ax.set_ylabel(ylabel)
return ds
def aggregate_get_ndays_pw_hydro(pw_da, hs_ids, max_flow_thresh=None,
hydro_path=hydro_path, ndays=5,
ndays_forward=1, plot=True):
import xarray as xr
import matplotlib.pyplot as plt
das = []
max_flows_list = []
pw_ndays_list = []
if not isinstance(hs_ids, list):
hs_ids = [int(hs_ids)]
else:
hs_ids = [int(x) for x in hs_ids]
used_ids = []
events = []
for sid in hs_ids:
print('proccessing hydro station {}'.format(sid))
try:
max_flows, pw_ndays, da = get_n_days_pw_hydro_all(pw_da, sid,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
ndays=ndays, ndays_forward=ndays_forward,
return_max_flows=True,
plot=False)
das.append(da)
pw_ndays_list.append(pw_ndays)
max_flows_list.append(max_flows)
used_ids.append(sid)
events.append(max_flows.size)
except KeyError as e:
print('{}, skipping...'.format(e))
continue
except ValueError as e:
print('{}, skipping...'.format(e))
continue
pw_ndays = xr.concat(pw_ndays_list, 'time')
dass = xr.concat(das, 'station')
dass['station'] = used_ids
dass.name = pw_da.name
dass.attrs['hydro_stations'] = len(used_ids)
dass.attrs['total_events'] = sum(events)
if plot:
fig, ax = plt.subplots(figsize=(20, 4))
color = 'tab:blue'
pw_ndays.plot.line(marker='.', linewidth=0., color=color, ax=ax)
ax.tick_params(axis='y', labelcolor=color)
ax.set_ylabel('PW [mm]', color=color)
ax2 = ax.twinx()
color = 'tab:red'
for mf in max_flows_list:
mf.plot.line(marker='X', linewidth=0., color=color, ax=ax2)
ax2.tick_params(axis='y', labelcolor=color)
ax.grid()
ax2.set_title(
'PW in station {} {} days before tide events ({} total)'.format(
pw_da.name, ndays, sum(events)))
ax2.set_ylabel('max_flow [m^3/sec]', color=color)
fig.tight_layout()
fig, ax = plt.subplots()
for sid in used_ids:
dass.sel(
station=sid).mean('tide_start').plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.set_ylabel('PW [mm]')
ax.grid()
fmt = list(zip(used_ids, events))
ax.legend(['station #{} ({} events)'.format(x, y) for x, y in fmt])
ax.set_title(
'Mean PW for tide stations near {} station'.format(pw_da.name))
if max_flow_thresh is not None:
ax.set_title(
'Mean PW for tide stations (above {} m^3/sec) near {} station'.format(
max_flow_thresh, pw_da.name))
return dass
def produce_pwv_days_before_tide_events(pw_da, hs_df, days_prior=1, drop_thresh=0.5,
days_after=1, plot=False, verbose=0,
max_gap='12H', rolling=12):
"""
takes pwv and hydro tide dates from one station and
rounds the hydro tides dates to 5 min
selects the tides dates that are at least the first date of pwv available
then if no pwv data prior to 1 day of tides date - drops
if more than half day missing - drops
then interpolates the missing pwv data points using spline
returns the dataframes contains pwv 1 day before and after tides
and pwv's 1 day prior to event and 1 day after.
Parameters
----------
pw_da : TYPE
pwv of station.
hs_df : TYPE
hydro tide dataframe for one station.
days_prior : TYPE, optional
DESCRIPTION. The default is 1.
drop_thresh : TYPE, optional
DESCRIPTION. The default is 0.5.
days_after : TYPE, optional
DESCRIPTION. The default is 1.
plot : TYPE, optional
DESCRIPTION. The default is False.
verbose : TYPE, optional
DESCRIPTION. The default is 0.
max_gap : TYPE, optional
DESCRIPTION. The default is '12H'.
rolling : TYPE, optional
DESCRIPTION. The default is 12.
Returns
-------
df : TYPE
DESCRIPTION.
pwv_after_list : TYPE
DESCRIPTION.
pwv_prior_list : TYPE
DESCRIPTION.
"""
import pandas as pd
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
if rolling is not None:
pw_da = pw_da.rolling(time=rolling, center=True).mean(keep_attrs=True)
if drop_thresh is None:
drop_thresh = 0
# first infer time freq of pw_da:
freq = xr.infer_freq(pw_da['time'])
if freq == '5T':
pts_per_day = 288
timedelta = pd.Timedelta(5, unit='min')
if freq == '1H' or freq == 'H':
pts_per_day = 24
timedelta = pd.Timedelta(1, unit='H')
# get the minimum dt of the pwv station:
min_dt = pw_da.dropna('time').time.min().values
# round the hs_df to 5 mins, and find the closest min_dt:
hs_df.index = hs_df.index.round(freq)
hs_df = hs_df[~hs_df.index.duplicated(keep='first')]
hs_df = hs_df.sort_index()
min_ind = hs_df.index.get_loc(min_dt, method='nearest')
# slice the tides data accordinaly:
hs_df = hs_df.iloc[min_ind:].dropna()
# loop over each tide start and grab the datetimes
pwv_prior_list = []
pwv_after_list = []
# se_list = []
tot_events = hs_df.index.size
event_cnt = 0
dropped_thresh = 0
dropped_no_data = 0
for ts in hs_df.index:
dt_prior = ts - pd.Timedelta(days_prior, unit='d')
dt_after = ts + pd.Timedelta(days_after, unit='d')
after_da = pw_da.sel(time=slice(ts, dt_after))
prior_da = pw_da.sel(time=slice(dt_prior, ts - timedelta))
if prior_da.dropna('time').size == 0:
if verbose == 1:
print('{} found no prior data for PWV {} days prior'.format(
ts.strftime('%Y-%m-%d %H:%M'), days_prior))
dropped_no_data += 1
continue
elif prior_da.dropna('time').size < pts_per_day*drop_thresh:
if verbose == 1:
print('{} found less than {} a day prior data for PWV {} days prior'.format(
ts.strftime('%Y-%m-%d %H:%M'), drop_thresh, days_prior))
dropped_thresh += 1
continue
if max_gap is not None:
prior_da = prior_da.interpolate_na(
'time', method='spline', max_gap=max_gap, keep_attrs=True)
event_cnt += 1
# if rolling is not None:
# after_da = after_da.rolling(time=rolling, center=True, keep_attrs=True).mean(keep_attrs=True)
# prior_da = prior_da.rolling(time=rolling, center=True, keep_attrs=True).mean(keep_attrs=True)
# after_da.name = pw_da.name + '_{}'.format(i)
pwv_after_list.append(after_da)
pwv_prior_list.append(prior_da)
# se = da.reset_index('time', drop=True).to_dataframe()[da.name]
# se_list.append(se)
se_list = []
for i, (prior, after) in enumerate(zip(pwv_prior_list, pwv_after_list)):
# return prior, after
# df_p = prior.to_dataframe()
# df_a = after.to_dataframe()
# return df_p, df_a
da = xr.concat([prior, after], 'time')
# print(da)
se = da.reset_index('time', drop=True).to_dataframe()
se.columns = [da.name + '_{}'.format(i)]
# print(se)
# [da.name + '_{}'.format(i)]
se_list.append(se)
df = pd.concat(se_list, axis=1)
df = df.iloc[:-1]
df.index = np.arange(-days_prior, days_after, 1/pts_per_day)
if verbose >= 0:
print('total events with pwv:{} , dropped due to no data: {}, dropped due to thresh:{}, left events: {}'.format(
tot_events, dropped_no_data, dropped_thresh, event_cnt))
if plot:
ax = df.T.mean().plot()
ax.grid()
ax.axvline(color='k', linestyle='--')
ax.set_xlabel('Days before tide event')
ax.set_ylabel('PWV anomalies [mm]')
ax.set_title('GNSS station: {} with {} events'.format(
pw_da.name.upper(), event_cnt))
better = df.copy()
better.index = pd.to_timedelta(better.index, unit='d')
better = better.resample('15S').interpolate(
method='cubic').T.mean().resample('5T').mean()
better = better.reset_index(drop=True)
better.index = np.linspace(-days_prior, days_after, better.index.size)
better.plot(ax=ax)
# fig, ax = plt.subplots(figsize=(20, 7))
# [pwv.plot.line(ax=ax) for pwv in pwv_list]
return df, pwv_after_list, pwv_prior_list
def get_n_days_pw_hydro_all(pw_da, hs_id, max_flow_thresh=None,
hydro_path=hydro_path, ndays=5, ndays_forward=1,
return_max_flows=False, plot=True):
"""calculate the mean of the PW ndays before all tide events in specific
hydro station. can use max_flow_thresh to get only event with al least
this max_flow i.e., big tide events"""
# important, DO NOT dropna pw_da!
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
def get_n_days_pw_hydro_one_event(pw_da, tide_start, ndays=ndays, ndays_forward=0):
freq = pd.infer_freq(pw_da.time.values)
# for now, work with 5 mins data:
if freq == '5T':
points = int(ndays) * 24 * 12
points_forward = int(ndays_forward) * 24 * 12
elif freq == '10T':
points = int(ndays) * 24 * 6
points_forward = int(ndays_forward) * 24 * 6
elif freq == 'H':
points = int(ndays) * 24
points_forward = int(ndays_forward) * 24
lag = | pd.timedelta_range(end=0, periods=points, freq=freq) | pandas.timedelta_range |
# -*- coding: utf-8 -*-
# imports
import string
import logging, os, sys
import math
import re
import pandas as pd
from collections import Counter
from db.models import session, engine
from db.controller import Storage
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import datetime
from langdetect import detect, detect_langs
d = datetime.datetime.now()
dn = d.strftime("%Y-%m-%d")
FOLDER_PATH_SPAMFILTER = "d:\\CommuniGate Files\\SpamFilter\\"
FOLDER_PATH_LOG = "d:\\CommuniGate Files\\SpamFilter\\SpamFilterLog\\"
PATH_LOG = os.path.join(FOLDER_PATH_LOG, '{}.log'.format(dn))
logging.basicConfig(format='%(asctime)s.%(msecs)d %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG,
filename=PATH_LOG)
storage = Storage(session)
spam = []
not_spam = []
csv_file = os.path.join(FOLDER_PATH_SPAMFILTER, 'spam_proposals_training.csv')
#csv_file = 'spam_proposals_training.csv'
#nltk.download()
spam_words = []
not_spam_words = []
main_table = pd.DataFrame({
'word': [],
'spam': [],
'no_spam': [],
'probability_of_spam': [],
'probability_not_spam': []
})
garbagelist = [u'спасибо', u'пожалуйста', u'добрый', u'день', u'вечер',u'заявка', u'прошу', u'доброе', u'утро']
def splitstring(str):
words = []
#разбиваем строку по символам из []
for i in re.split('[;,.,\n,\s,:,-,+,(,),=,/,«,»,@,\d,!,?,"]', str):
#берём только "слова" длиной 2 и более символов
if len(i) > 1:
#не берем слова-паразиты
if i in garbagelist:
pass
else:
words.append(i)
return words
def tokenize_ru(file_text):
try:
# firstly let's apply nltk tokenization
tokens = splitstring(file_text.lower())
# let's delete punctuation symbols
tokens = [i for i in tokens if (i not in string.punctuation)]
# deleting stop_words
stop_words = stopwords.words('russian')
stop_words.extend(['что', 'это', 'так', 'вот', 'быть', 'как', 'в', '—', '–', 'к', 'на', '...'])
tokens = [i for i in tokens if (i not in stop_words)]
# cleaning words
tokens = [i.replace("«", "").replace("»", "") for i in tokens]
tokens = [i for i in tokens if not (len(i) == 1)]
return tokens
except Exception as e:
logging.critical('Ошибка в spam_analysis_proposals.py функции tokenize_me: {}'.format(e))
# Создаем функцию подсчета вероятности вхождения слова Xi в документ класса Qk
def formula_1(N_ik, M, N_k):
#print("({} + {}) / ({} + {})".format(1, N_ik, M, N_k))
try:
return (1+N_ik)/(M+N_k)
except ZeroDivisionError as e:
logging.critical(
'Ошибка в spam_analysis_proposals.py функции formula_1, деления на ноль, вероятно таблица пуста: {}'.format(e))
except Exception as e:
logging.critical('Ошибка в spam_analysis_proposals.py функции formula_1: {}'.format(e))
def training():
try:
# Обучаюшая выборка со спам письмами:
for i in storage.select_mail(spam_or_no_spam=True):
spam.append(i.text)
# Обучающая выборка с не спам письмами:
for i in storage.select_mail(spam_or_no_spam=False):
not_spam.append(i.text)
# ---------------Для спама------------------
for line in spam:
spam_words.extend(tokenize_ru(line))
# Создаем таблицу с уникальными словами и их количеством
unique_words = Counter(spam_words)
# ---------------Для не спама------------------
for line in not_spam:
spam_words.extend(tokenize_ru(line))
main_table['word'] = tuple(unique_words.keys())
main_table['spam'] = tuple(unique_words.values())
main_table['no_spam'] = [0 for x in range(len(tuple(unique_words.values())))]
for i in range(len(not_spam_words)):
# Создаем логическую переменную
need_word = True
for j in range(len(main_table.index)):
# Если "не спам" слово существует, то к счетчику уникальных слов +1
if not_spam_words[i] == main_table.loc[j, 'word']:
main_table.loc[j, "no_spam"] = main_table.loc[j, "no_spam"] + 1
need_word = False
# Если слово не встречалось еще, то добавляем его в конец data frame и создаем счетчики
if need_word:
main_table.loc[len(main_table.index)] = [not_spam_words[i], 0, 1, pd.np.nan, pd.np.nan]
main_table.to_csv(csv_file)
except Exception as e:
logging.critical('Ошибка в spam_analysis_proposals.py функции training: {}'.format(e))
def analysis(main_table, test_letter):
try:
# Считаем количество слов из обучающей выборки
quantity = len(main_table.index)
test_letter_list = []
# ---------------Для проверки------------------
sentences = [tokenize_ru(sent) for sent in sent_tokenize(test_letter, 'russian')]
for i in sentences:
test_letter_list.append(' '.join(i))
for i in range(len(test_letter_list)):
# Используем ту же логическую переменную, чтобы не создавать новую
need_word = True
for j in range(len(main_table.index)):
# Если слово из проверочного письма уже существует в нашей выборке то считаем вероятность каждой категории
if test_letter_list[i] == main_table.loc[j, 'word']:
main_table.loc[j, 'probability_of_spam'] = formula_1(main_table.loc[j, 'spam'], quantity, sum(main_table['spam']))
main_table.loc[j, 'probability_not_spam'] = formula_1(main_table.loc[j, 'no_spam'], quantity, sum(main_table['no_spam']))
need_word = False
# Если слова нет, то добавляем его в конец data frame, и считаем вероятность спама/не спама
if need_word:
main_table.loc[len(main_table.index)] = [test_letter_list[i], 0, 0,
formula_1(0, quantity, sum(main_table['spam'])),
formula_1(0, quantity, sum(main_table['no_spam']))]
# Переменная для подсчета оценки класса "Спам"
probability_spam = 1
# Переменная для подсчета оценки класса "Не спам"
probability_not_spam = 1
# Переменная для подсчета оценки класса "Спам"
probability_spam_log = 1
# Переменная для подсчета оценки класса "Не спам"
probability_not_spam_log = 1
for i in range(len(main_table.index)):
if not main_table.loc[i, 'probability_of_spam'] is None and not pd.isnull(
main_table.loc[i, 'probability_of_spam']):
# Шаг 1.1 Определяем оценку того, что письмо - спам
probability_spam = probability_spam * main_table.loc[i, 'probability_of_spam']
if not main_table.loc[i, 'probability_not_spam'] is None and not pd.isnull(
main_table.loc[i, 'probability_not_spam']):
# Шаг 1.2 Определяем оценку того, что письмо - не спам
probability_not_spam = probability_not_spam * main_table.loc[i, 'probability_not_spam']
#probability_spam = probability_spam * (2/4)
#probability_not_spam = probability_not_spam * (2/4)
# Шаг 2.1 Определяем оценку того, что письмо - спам
probability_spam = (main_table['spam'].sum() / (main_table['spam'].sum() + main_table['no_spam'].sum())) * probability_spam
# Шаг 2.2 Определяем оценку того, что письмо - не спам
probability_not_spam = (main_table['no_spam'].sum() / (main_table['spam'].sum() + main_table['no_spam'].sum())) * probability_not_spam
logging.debug("Оценка для категории «Спам»: {} Оценка для категории «Не спам»: {}".format(probability_spam, probability_not_spam))
logging.debug("Оценка для категории «Спам»: {} Оценка для категории «Не спам»: {}".format(math.log(probability_spam), math.log(probability_not_spam)))
spam_count = (probability_spam / probability_not_spam) * (math.log(probability_spam) / math.log(probability_not_spam))
# Чья оценка больше - тот и победил
if probability_spam > probability_not_spam:
return True, spam_count
else:
return False, spam_count
except Exception as e:
logging.critical('Ошибка в spam_analysis_proposals.py функции analysis: {}'.format(e))
return 'ERROR'
def spam_analysis_main(test_letter):
try:
if detect(test_letter) == 'ru':
if not os.path.isfile(csv_file):
training()
df = pd.read_csv(csv_file)
main_table = df.copy()
return analysis(main_table, test_letter)
else:
return True, 100
except Exception as e:
logging.critical('Ошибка в spam_analysis_proposals.py функции spam_analysis_main: {}'.format(e))
if __name__ == '__main__':
#test_letter = "В магазине гора яблок. Купи семь килограмм и шоколадку"
#test_letter = 'Путевки по низкой цене'
test_letter = 'Завтра состоится собрание'
if not os.path.isfile(csv_file):
print('тренировка')
training()
df = | pd.read_csv(csv_file) | pandas.read_csv |
import pandas as pd
import evaluation
import pytest
def test_labels() -> None:
labels = pd.DataFrame.from_dict({'label': ['high', 'medium', 'low'], 'url': ['a', 'b', 'c']})
predictions = pd.DataFrame.from_dict({'prediction': ['high', 'low', 'low'], 'url': ['a', 'b', 'c']})
result = evaluation.calc_error_metrics(labels, predictions)
assert 2/3 == pytest.approx(result[1])
def test_convert_label() -> None:
labels = | pd.DataFrame.from_dict({'label': ['high', 'low', 'low'], 'url': ['a', 'b', 'c']}) | pandas.DataFrame.from_dict |
"""Backtester"""
from copy import deepcopy
import unittest
import pandas as pd
import pytest
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from soam.constants import (
ANOMALY_PLOT,
DS_COL,
FIG_SIZE,
MONTHLY_TIME_GRANULARITY,
PLOT_CONFIG,
Y_COL,
)
from soam.models.prophet import SkProphet
from soam.plotting.forecast_plotter import ForecastPlotterTask
from soam.workflow import (
Backtester,
BaseDataFrameTransformer,
Forecaster,
Transformer,
compute_metrics,
)
from soam.workflow.backtester import METRICS_KEYWORD, PLOT_KEYWORD, RANGES_KEYWORD
from tests.helpers import sample_data_df # pylint: disable=unused-import
def test_compute_metrics():
"""Function to compute performance metrics."""
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
expected_output = {'mae': 0.5, 'mse': 0.375}
output = compute_metrics(y_true, y_pred, metrics)
unittest.TestCase().assertDictEqual(expected_output, output)
class SimpleProcessor(BaseDataFrameTransformer):
"""Create a Simple Processor object."""
def __init__(self, **fit_params): # pylint:disable=super-init-not-called
self.preproc = StandardScaler(**fit_params)
def fit(self, df_X):
self.preproc.fit(df_X[Y_COL].values.reshape(-1, 1))
return self
def transform(self, df_X, inplace=True):
if not inplace:
df_X = df_X.copy()
df_X[Y_COL] = self.preproc.transform(df_X[Y_COL].values.reshape(-1, 1)) + 10
return df_X
def assert_backtest_fold_result_common_checks(rv, ranges=None, plots=None):
"""Backtest fold result common checks assertion."""
assert tuple(rv) == (RANGES_KEYWORD, METRICS_KEYWORD, PLOT_KEYWORD)
assert rv[RANGES_KEYWORD] == ranges
assert rv[PLOT_KEYWORD].name == plots
def assert_backtest_fold_result(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
for metric_name, values in metrics.items():
assert metric_name in rv[METRICS_KEYWORD]
if isinstance(values, dict):
for measure_name, value in values.items():
assert value, pytest.approx(rv[METRICS_KEYWORD][measure_name], 0.01)
else:
assert values, pytest.approx(rv[METRICS_KEYWORD][metric_name], 0.01)
def assert_backtest_all_folds_result(rvs, expected_values):
"""Backtest all fold result assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result(rv, **evs)
def assert_backtest_fold_result_aggregated(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result aggregated assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
output_metrics = pd.DataFrame(rv[METRICS_KEYWORD])
expected_metrics = pd.DataFrame(metrics)
pd.testing.assert_frame_equal(output_metrics, expected_metrics, rtol=1e-1)
def assert_backtest_all_folds_result_aggregated(rvs, expected_values):
"""Backtest all fold result aggregated assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result_aggregated(rv, **evs)
def test_integration_backtester_single_fold(
tmp_path, sample_data_df
): # pylint: disable=redefined-outer-name
"""Backtest single fold integration test."""
test_window = 10
train_data = sample_data_df
forecaster = Forecaster(model=SkProphet(), output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2016-05-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 0.19286372252777645, 'mse': 0.07077117049346579},
'plots': '0_forecast_2013020100_2015080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
def test_integration_backtester_multi_fold(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.140921182444867, 'mse': 2.4605768804352675},
'plots': '0_forecast_2013020100_2015080100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2015-08-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.600049020613293, 'mse': 4.383723067139095},
'plots': '0_forecast_2015080100_2018020100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2018-02-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 3.1358162976127217, 'mse': 12.666965373730687},
'plots': '0_forecast_2018020100_2020080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
# TODO: It maybe a good visual aggregation to include all metrics in one plot. This
# TODO: is not possible with the current implementation.
def test_integration_backtester_multi_fold_default_aggregation(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold default aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation="default",
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {
'mae': {
'avg': 2.0269522786354313,
'max': 3.135813436023453,
'min': 1.344995687583762,
},
'mse': {
'avg': 6.761216280050696,
'max': 12.666927167728852,
'min': 3.233004063171241,
},
},
'plots': '0_forecast_2018020100_2020080100_.png',
}
]
assert_backtest_all_folds_result_aggregated(rvs, expected_values)
def test_integration_backtester_multi_fold_custom_aggregations(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold custom aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
aggregation = {
METRICS_KEYWORD: {
"weighted_begining": lambda metrics_list: (
sum(
[
3 * val if idx == 0 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
"weighted_ending": lambda metrics_list: (
sum(
[
3 * val if idx == len(metrics_list) - 1 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
},
PLOT_KEYWORD: 1,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation=aggregation,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {
'mae': {
'weighted_begining': 1.631725773112123,
'weighted_ending': 2.4296838191792647,
},
'mse': {
'weighted_begining': 4.886483816435117,
'weighted_ending': 8.969039213753284,
},
},
'plots': '0_forecast_2015080100_2018020100_.png',
}
]
assert_backtest_all_folds_result_aggregated(rvs, expected_values)
def test_integration_backtester_multi_fold_custom_metric_aggregation_default_plot(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold custom metric aggregation default plot integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
aggregation = {
METRICS_KEYWORD: {
"weighted_begining": lambda metrics_list: (
sum(
[
3 * val if idx == 0 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
"weighted_ending": lambda metrics_list: (
sum(
[
3 * val if idx == len(metrics_list) - 1 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
}
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation=aggregation,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
| pd.Timestamp('2023-01-01 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame(pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
lowM =pd.DataFrame(pd.pivot_table(data,values='LowPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
closeM = pd.DataFrame(pd.pivot_table(data,values='ClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
volumeM = pd.DataFrame(pd.pivot_table(data,values='TurnoverVolume',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
amountM = pd.DataFrame(pd.pivot_table(data,values='TurnoverValue',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
retM = pd.DataFrame(pd.pivot_table(data,values='StockReturns',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)], columns=self.tickerUnivSR)
sql = '''
SELECT A.[ExDiviDate], B.[SecuMarket], B.[SecuCode], A.[AdjustingFactor]
FROM [JYDB].[dbo].[QT_AdjustingFactor] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
'''
dataAF = pd.read_sql_query(sql, conn243)
dataAF = dataAF.rename({'ExDiviDate':'TradingDay'},axis=1)
flagMarket = dataAF.SecuMarket == 83
dataAF['SecuCode'][flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SH')
dataAF['SecuCode'][~flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SZ')
dataAF.TradingDay = dataAF.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
adjFactorM = pd.pivot_table(dataAF, values='AdjustingFactor', index='TradingDay', columns='SecuCode')
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM = pd.DataFrame(adjFactorM ,index=self.tradingDateV, columns=self.tickerUnivSR)
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM =pd.DataFrame(adjFactorM ,index=[str(tradingDay)])
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.ChangeType = 1 or A.ChangeType = 4)
'''
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[PubDate],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexBasicInfo] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
'''
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex['ChangeType'] = 1
dataIndex = dataIndex.rename({'PubDate': 'ChangeDate'}, axis='columns')
dataV = pd.concat([dataIndex, dataStock])
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
listedM = pd.pivot_table(dataV, values='ChangeType', index='ChangeDate', columns='SecuCode')
dateTotal = np.union1d(listedM.index.values, [str(tradingDay)])
listedM = pd.DataFrame(listedM, index=dateTotal, columns=self.tickerUnivSR)
listedM[listedM == 4] = 0
listedM.fillna(method='pad', inplace=True)
listedM = pd.DataFrame(listedM,index= [str(tradingDay)])
listedM = listedM.fillna(0)
sql = '''
SELECT A.[SuspendDate],A.[ResumptionDate],A.[SuspendTime], A.[ResumptionTime], B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SuspendResumption] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.[SuspendDate] = '%s'
'''%tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SuspendDate] = ','A.[SuspendDate] <= ')
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[flag] = 1
suspM = endFlag.fillna(0)
suspM[(listedM==0)] = 1
else:
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
suspPre = pickle.load(file2)['suspM']
file2.close()
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[~flag] = 0
suspM = pd.concat([suspPre,endFlag]).fillna(method='pad')
suspM = pd.DataFrame(suspM,index=[str(tradingDay)])
suspM[(listedM==0)] = 1
sql='''
SELECT A.[SpecialTradeTime],A.[SpecialTradeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SpecialTrade] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.[SpecialTradeType]=1 or A.[SpecialTradeType] = 2 or A.[SpecialTradeType] = 5 or A.[SpecialTradeType] = 6)
and A.[SpecialTradeTime] = '%s'
'''% tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SpecialTradeTime] = ','A.[SpecialTradeTime] <= ')
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
dateTotal = np.union1d(stStateM.index.values, [str(tradingDay)])
stStateM = pd.DataFrame(stStateM, index=dateTotal, columns=self.tickerUnivSR)
stStateM = stStateM.fillna(method='pad')
stStateM = pd.DataFrame(stStateM, index=[str(tradingDay)])
stStateM = stStateM.fillna(0)
else:
try:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
stStateM = pd.concat([stStatePre,stStateM]).fillna(method='pad')
except:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
stStateM = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
stStateM = pd.concat([stStatePre,stStateM]).fillna(method='pad')
# stStateM = pd.DataFrame(stStatePre,index=np.concatenate([stStatePre.index.values,str(tradingDay)]))
# stStateM = stStateM.fillna(method='pad')
finally:
stStateM = pd.DataFrame(stStateM, index=[str(tradingDay)])
stStateM = stStateM.fillna(0).astype(int)
sql = '''
SELECT A.[InDate],A.[OutDate],B.[SecuCode] as IndexCode,B.[SecuMarket] as IndexMarket,C.[SecuCode],C.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexComponent] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexInnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
inner join [JYDB].[dbo].[SecuMain] C
on A.[SecuInnerCode]=C.[InnerCode]
and C.SecuMarket in (83,90)
and C.SecuCategory=1
where A.[InDate] = '%s' or A.[OutDate] = '%s'
'''%(tradingDay,tradingDay)
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[InDate] = ','A.[InDate] <= ').replace('A.[OutDate] = ','A.[OutDate] <= ')
data = pd.read_sql_query(sql, conn243)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x+'.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x+'.SZ')
flagMarket = data.IndexMarket==83
data['IndexCode'][flagMarket] = data['IndexCode'].map(lambda x: x+'.SH')
data['IndexCode'][~flagMarket] = data['IndexCode'].map(lambda x: x+'.SZ')
data.InDate = data.InDate.map(lambda x: x.strftime('%Y%m%d'))
flagDate = pd.notnull(data.OutDate)
data.OutDate[flagDate] = data.OutDate[flagDate].map(lambda x: x.strftime('%Y%m%d'))
data['flag_start'] = 1
data['flag_end']= 0
try:
data300 = data[data.IndexCode == '000300.SH']
data300.OutDate[data300.OutDate > tradingDay] = np.nan
t_start = pd.pivot_table(data300, values='flag_start', index='InDate', columns='SecuCode')
t_end = pd.pivot_table(data300, values='flag_end', index='OutDate', columns='SecuCode')
dateTotal = reduce(np.union1d, (t_start.index.values,t_end.index.values,str(tradingDay)))
t_start = pd.DataFrame(t_start, index=dateTotal, columns=self.tickerUnivSR)
t_end = pd.DataFrame(t_end, index=dateTotal, columns=self.tickerUnivSR)
IndexConstM = t_start
IndexConstM[t_end == 0] = 0
IndexConstM = IndexConstM.fillna(method='pad')
IndexConstM = pd.DataFrame(IndexConstM,index=[str(tradingDay)],columns=self.tickerUnivSR)
hs300ConstC_IndexConstM = IndexConstM.fillna(0).astype(int)
except:
hs300ConstC_IndexConstM = pd.DataFrame(index=[str(tradingDay)],columns=self.tickerUnivSR).fillna(0).astype(int)
try:
data50 = data[data.IndexCode == '000016.SH']
data50.OutDate[data50.OutDate > tradingDay] = np.nan
t_start = pd.pivot_table(data50, values='flag_start', index='InDate', columns='SecuCode')
t_end = pd.pivot_table(data50, values='flag_end', index='OutDate', columns='SecuCode')
dateTotal = reduce(np.union1d, (t_start.index.values,t_end.index.values,str(tradingDay)))
t_start = pd.DataFrame(t_start, index=dateTotal, columns=self.tickerUnivSR)
t_end = pd.DataFrame(t_end, index=dateTotal, columns=self.tickerUnivSR)
IndexConstM = t_start
IndexConstM[t_end == 0] = 0
IndexConstM = IndexConstM.fillna(method='pad')
IndexConstM = pd.DataFrame(IndexConstM,index=[str(tradingDay)],columns=self.tickerUnivSR)
sz50ConstC_IndexConstM = IndexConstM.fillna(0).astype(int)
except:
sz50ConstC_IndexConstM = pd.DataFrame(index=[str(tradingDay)],columns=self.tickerUnivSR).fillna(0).astype(int)
try:
data500 = data[data.IndexCode == '000905.SH']
data500.OutDate[data500.OutDate > tradingDay] = np.nan
t_start = pd.pivot_table(data500, values='flag_start', index='InDate', columns='SecuCode')
t_end = | pd.pivot_table(data500, values='flag_end', index='OutDate', columns='SecuCode') | pandas.pivot_table |
# Module: Preprocess
# Author: <NAME> <<EMAIL>>
# License: MIT
import pandas as pd
import numpy as np
import ipywidgets as wg
from IPython.display import display
from ipywidgets import Layout
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.impute._base import _BaseImputer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.cross_decomposition import PLSRegression
from sklearn.manifold import TSNE
from sklearn.decomposition import IncrementalPCA
from sklearn.preprocessing import KBinsDiscretizer
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
from pyod.models.pca import PCA as PCA_od
from sklearn import cluster
from scipy import stats
from sklearn.ensemble import RandomForestClassifier as rfc
from sklearn.ensemble import RandomForestRegressor as rfr
from lightgbm import LGBMClassifier as lgbmc
from lightgbm import LGBMRegressor as lgbmr
import sys
import gc
from sklearn.pipeline import Pipeline
from sklearn import metrics
from datetime import datetime
import calendar
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
from typing import Optional, Union
from pycaret.internal.logging import get_logger
from pycaret.internal.utils import infer_ml_usecase
from sklearn.utils.validation import check_is_fitted, check_X_y, check_random_state
from sklearn.utils.validation import _deprecate_positional_args
from sklearn.utils import _safe_indexing
from sklearn.exceptions import NotFittedError
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
SKLEARN_EMPTY_STEP = "passthrough"
# _____________________________________________________________________________________________________________________________
def str_if_not_null(x):
if pd.isnull(x) or (x is None) or pd.isna(x) or (x is not x):
return x
return str(x)
def find_id_columns(data, target, numerical_features):
# some times we have id column in the data set, we will try to find it and then will drop it if found
len_samples = len(data)
id_columns = []
for i in data.select_dtypes(
include=["object", "int64", "float64", "float32"]
).columns:
col = data[i]
if i not in numerical_features and i != target:
if sum(col.isnull()) == 0:
try:
col = col.astype("int64")
except:
continue
if col.nunique() == len_samples:
# we extract column and sort it
features = col.sort_values()
# no we subtract i+1-th value from i-th (calculating increments)
increments = features.diff()[1:]
# if all increments are 1 (with float tolerance), then the column is ID column
if sum(np.abs(increments - 1) < 1e-7) == len_samples - 1:
id_columns.append(i)
return id_columns
class DataTypes_Auto_infer(BaseEstimator, TransformerMixin):
"""
- This will try to infer data types automatically, option to override learent data types is also available.
- This alos automatically delets duplicate columns (values or same colume name), removes rows where target variable is null and
remove columns and rows where all the records are null
"""
def __init__(
self,
target,
ml_usecase,
categorical_features=[],
numerical_features=[],
time_features=[],
features_todrop=[],
id_columns=[],
display_types=True,
float_dtype="float32",
): # nothing to define
"""
User to define the target (y) variable
args:
target: string, name of the target variable
ml_usecase: string , 'regresson' or 'classification . For now, only supports two class classification
- this is useful in case target variable is an object / string . it will replace the strings with integers
categorical_features: list of categorical features, default None, when None best guess will be used to identify categorical features
numerical_features: list of numerical features, default None, when None best guess will be used to identify numerical features
time_features: list of date/time features, default None, when None best guess will be used to identify date/time features
"""
self.target = target
self.ml_usecase = ml_usecase
self.features_todrop = [str(x) for x in features_todrop]
self.categorical_features = [
x for x in categorical_features if x not in self.features_todrop
]
self.numerical_features = [
x for x in numerical_features if x not in self.features_todrop
]
self.time_features = [x for x in time_features if x not in self.features_todrop]
self.display_types = display_types
self.id_columns = id_columns
self.float_dtype = float_dtype
def fit(self, dataset, y=None): # learning data types of all the columns
"""
Args:
data: accepts a pandas data frame
Returns:
Panda Data Frame
"""
data = dataset.copy()
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop, errors="ignore", inplace=True)
# remove sepcial char from column names
# data.columns= data.columns.str.replace('[,]','')
# we will take float as numberic, object as categorical from the begning
# fir int64, we will check to see what is the proportion of unique counts to the total lenght of the data
# if proportion is lower, then it is probabaly categorical
# however, proportion can be lower / disturebed due to samller denominator (total lenghth / number of samples)
# so we will take the following chart
# 0-50 samples, threshold is 24%
# 50-100 samples, th is 12%
# 50-250 samples , th is 4.8%
# 250-500 samples, th is 2.4%
# 500 and above 2% or belwo
# if there are inf or -inf then replace them with NaN
data.replace([np.inf, -np.inf], np.NaN, inplace=True)
# we canc check if somehow everything is object, we can try converting them in float
for i in data.select_dtypes(include=["object"]).columns:
try:
data[i] = data[i].astype("int64")
except:
None
for i in (
data.select_dtypes(include=["object"])
.drop(self.target, axis=1, errors="ignore")
.columns
):
try:
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="raise"
)
except:
continue
# if data type is bool or pandas Categorical , convert to categorical
for i in data.select_dtypes(include=["bool", "category"]).columns:
data[i] = data[i].astype("object")
# wiith csv , if we have any null in a colum that was int , panda will read it as float.
# so first we need to convert any such floats that have NaN and unique values are lower than 20
for i in data.select_dtypes(include=["float64"]).columns:
data[i] = data[i].astype(self.float_dtype)
# count how many Nas are there
na_count = sum(data[i].isnull())
# count how many digits are there that have decimiles
count_float = np.nansum(
[False if r.is_integer() else True for r in data[i]]
)
# total decimiels digits
count_float = (
count_float - na_count
) # reducing it because we know NaN is counted as a float digit
# now if there isnt any float digit , & unique levales are less than 20 and there are Na's then convert it to object
if (count_float == 0) & (data[i].nunique() <= 20) & (na_count > 0):
data[i] = data[i].astype("object")
# should really be an absolute number say 20
# length = len(data.iloc[:,0])
# if length in range(0,51):
# th=.25
# elif length in range(51,101):
# th=.12
# elif length in range(101,251):
# th=.048
# elif length in range(251,501):
# th=.024
# elif length > 500:
# th=.02
# if column is int and unique counts are more than two, then: (exclude target)
for i in data.select_dtypes(include=["int64"]).columns:
if i != self.target:
if data[i].nunique() <= 20: # hard coded
data[i] = data[i].apply(str_if_not_null)
else:
data[i] = data[i].astype(self.float_dtype)
# # if colum is objfloat and only have two unique counts , this is probabaly one hot encoded
# # make it object
for i in data.select_dtypes(include=[self.float_dtype]).columns:
if data[i].nunique() == 2:
data[i] = data[i].apply(str_if_not_null)
# for time & dates
# self.drop_time = [] # for now we are deleting time columns
# now in case we were given any specific columns dtypes in advance , we will over ride theos
for i in self.categorical_features:
try:
data[i] = data[i].apply(str_if_not_null)
except:
data[i] = dataset[i].apply(str_if_not_null)
for i in self.numerical_features:
try:
data[i] = data[i].astype(self.float_dtype)
except:
data[i] = dataset[i].astype(self.float_dtype)
for i in self.time_features:
try:
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="raise"
)
except:
data[i] = pd.to_datetime(
dataset[i], infer_datetime_format=True, utc=False, errors="raise"
)
for i in data.select_dtypes(
include=["datetime64", "datetime64[ns, UTC]"]
).columns:
data[i] = data[i].astype("datetime64[ns]")
# table of learent types
self.learned_dtypes = data.dtypes
# self.training_columns = data.drop(self.target,axis=1).columns
# if there are inf or -inf then replace them with NaN
data = data.replace([np.inf, -np.inf], np.NaN).astype(self.learned_dtypes)
# lets remove duplicates
# remove duplicate columns (columns with same values)
# (too expensive on bigger data sets)
# data_c = data.T.drop_duplicates()
# data = data_c.T
# remove columns with duplicate name
data = data.loc[:, ~data.columns.duplicated()]
# Remove NAs
data.dropna(axis=0, how="all", inplace=True)
data.dropna(axis=1, how="all", inplace=True)
# remove the row if target column has NA
try:
data.dropna(subset=[self.target], inplace=True)
except KeyError:
pass
# self.training_columns = data.drop(self.target,axis=1).columns
# since due to transpose , all data types have changed, lets change the dtypes to original---- not required any more since not transposing any more
# for i in data.columns: # we are taking all the columns in test , so we dot have to worry about droping target column
# data[i] = data[i].astype(self.learned_dtypes[self.learned_dtypes.index==i])
if self.display_types == True:
display(
wg.Text(
value="Following data types have been inferred automatically, if they are correct press enter to continue or type 'quit' otherwise.",
layout=Layout(width="100%"),
),
display_id="m1",
)
dt_print_out = pd.DataFrame(
self.learned_dtypes, columns=["Feature_Type"]
).drop("UNSUPERVISED_DUMMY_TARGET", errors="ignore")
dt_print_out["Data Type"] = ""
for i in dt_print_out.index:
if i != self.target:
if i in self.id_columns:
dt_print_out.loc[i, "Data Type"] = "ID Column"
elif dt_print_out.loc[i, "Feature_Type"] == "object":
dt_print_out.loc[i, "Data Type"] = "Categorical"
elif dt_print_out.loc[i, "Feature_Type"] == self.float_dtype:
dt_print_out.loc[i, "Data Type"] = "Numeric"
elif dt_print_out.loc[i, "Feature_Type"] == "datetime64[ns]":
dt_print_out.loc[i, "Data Type"] = "Date"
# elif dt_print_out.loc[i,'Feature_Type'] == 'int64':
# dt_print_out.loc[i,'Data Type'] = 'Categorical'
else:
dt_print_out.loc[i, "Data Type"] = "Label"
# if we added the dummy target column , then drop it
dt_print_out.drop(index="dummy_target", errors="ignore", inplace=True)
display(dt_print_out[["Data Type"]])
self.response = input()
if self.response in [
"quit",
"Quit",
"exit",
"EXIT",
"q",
"Q",
"e",
"E",
"QUIT",
"Exit",
]:
sys.exit(
"Read the documentation of setup to learn how to overwrite data types over the inferred types. setup function must run again before you continue modeling."
)
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
return data
def transform(self, dataset, y=None):
"""
Args:
data: accepts a pandas data frame
Returns:
Panda Data Frame
"""
data = dataset.copy()
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop, errors="ignore", inplace=True)
data = data[self.final_training_columns]
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# if there are inf or -inf then replace them with NaN
data.replace([np.inf, -np.inf], np.NaN, inplace=True)
try:
data.dropna(subset=[self.target], inplace=True)
except KeyError:
pass
# remove sepcial char from column names
# data.columns= data.columns.str.replace('[,]','')
# very first thing we need to so is to check if the training and test data hace same columns
for i in self.final_training_columns:
if i not in data.columns:
raise TypeError(
f"test data does not have column {i} which was used for training."
)
# just keep picking the data and keep applying to the test data set (be mindful of target variable)
for (
i
) in (
data.columns
): # we are taking all the columns in test , so we dot have to worry about droping target column
if i == self.target and (
(self.ml_usecase == "classification")
and (self.learned_dtypes[self.target] == "object")
):
data[i] = self.le.transform(data[i].apply(str).astype("object"))
data[i] = data[i].astype("int64")
else:
if self.learned_dtypes[i].name == "datetime64[ns]":
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="coerce"
)
data[i] = data[i].astype(self.learned_dtypes[i])
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
return data
# fit_transform
def fit_transform(self, dataset, y=None):
data = dataset
# since this is for training , we dont nees any transformation since it has already been transformed in fit
data = self.fit(data)
# additionally we just need to treat the target variable
# for ml use ase
if (self.ml_usecase == "classification") & (
data[self.target].dtype == "object"
):
self.le = LabelEncoder()
data[self.target] = self.le.fit_transform(
data[self.target].apply(str).astype("object")
)
self.replacement = _get_labelencoder_reverse_dict(self.le)
# self.u = list(pd.unique(data[self.target]))
# self.replacement = np.arange(0,len(self.u))
# data[self.target]= data[self.target].replace(self.u,self.replacement)
# data[self.target] = data[self.target].astype('int64')
# self.replacement = pd.DataFrame(dict(target_variable=self.u,replaced_with=self.replacement))
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
# finally save a list of columns that we would need from test data set
self.final_training_columns = data.columns.to_list()
self.final_training_columns.remove(self.target)
return data
# _______________________________________________________________________________________________________________________
# Imputation
class Simple_Imputer(_BaseImputer):
"""
Imputes all type of data (numerical,categorical & Time).
Highly recommended to run Define_dataTypes class first
Numerical values can be imputed with mean or median or filled with zeros
categorical missing values will be replaced with "Other"
Time values are imputed with the most frequesnt value
Ignores target (y) variable
Args:
Numeric_strategy: string , all possible values {'mean','median','zero'}
categorical_strategy: string , all possible values {'not_available','most frequent'}
target: string , name of the target variable
fill_value_numerical: number, value for filling missing values of numeric columns
fill_value_categorical: string, value for filling missing values of categorical columns
"""
_numeric_strategies = {
"mean": "mean",
"median": "median",
"most frequent": "most_frequent",
"zero": "constant",
}
_categorical_strategies = {
"most frequent": "most_frequent",
"not_available": "constant",
}
_time_strategies = {
"mean": "mean",
"median": "median",
"most frequent": "most_frequent",
}
def __init__(
self,
numeric_strategy,
categorical_strategy,
time_strategy,
target,
fill_value_numerical=0,
fill_value_categorical="not_available",
):
# Set the target variable, which we don't want to impute
self.target = target
if numeric_strategy not in self._numeric_strategies:
numeric_strategy = "zero"
self.numeric_strategy = numeric_strategy
if categorical_strategy not in self._categorical_strategies:
categorical_strategy = "most frequent"
self.categorical_strategy = categorical_strategy
if time_strategy not in self._time_strategies:
time_strategy = "most frequent"
self.time_strategy = time_strategy
self.fill_value_numerical = fill_value_numerical
self.fill_value_categorical = fill_value_categorical
# self.most_frequent_time = []
self.numeric_imputer = SimpleImputer(
strategy=self._numeric_strategies[self.numeric_strategy],
fill_value=fill_value_numerical,
)
self.categorical_imputer = SimpleImputer(
strategy=self._categorical_strategies[self.categorical_strategy],
fill_value=fill_value_categorical,
)
self.time_imputer = SimpleImputer(
strategy=self._time_strategies[self.time_strategy],
)
def fit(self, X, y=None):
"""
Fit the imputer on dataset.
Args:
X : pd.DataFrame, the dataset to be imputed
Returns:
self : Simple_Imputer
"""
try:
data = X.drop(self.target, axis=1)
except:
data = X
self.numeric_columns = data.select_dtypes(
include=["float32", "float64", "int32", "int64"]
).columns
self.categorical_columns = data.select_dtypes(
include=["object", "bool", "string", "category"]
).columns
self.time_columns = data.select_dtypes(
include=["datetime64[ns]", "timedelta64[ns]"]
).columns
statistics = []
if not self.numeric_columns.empty:
self.numeric_imputer.fit(data[self.numeric_columns])
statistics.append((self.numeric_imputer.statistics_, self.numeric_columns))
if not self.categorical_columns.empty:
self.categorical_imputer.fit(data[self.categorical_columns])
statistics.append(
(self.categorical_imputer.statistics_, self.categorical_columns)
)
if not self.time_columns.empty:
for col in self.time_columns:
data[col] = data[col][data[col].notnull()].astype(np.int64)
self.time_imputer.fit(data[self.time_columns])
statistics.append((self.time_imputer.statistics_, self.time_columns))
self.statistics_ = np.zeros(shape=len(data.columns), dtype=object)
columns = list(data.columns)
for s, index in statistics:
for i, j in enumerate(index):
self.statistics_[columns.index(j)] = s[i]
return self
def transform(self, X, y=None):
"""
Impute all missing values in dataset.
Args:
X: pd.DataFrame, the dataset to be imputed
Returns:
data: pd.DataFrame, the imputed dataset
"""
data = X
imputed_data = []
if not self.numeric_columns.empty:
numeric_data = pd.DataFrame(
self.numeric_imputer.transform(data[self.numeric_columns]),
columns=self.numeric_columns,
index=data.index,
)
imputed_data.append(numeric_data)
if not self.categorical_columns.empty:
categorical_data = pd.DataFrame(
self.categorical_imputer.transform(data[self.categorical_columns]),
columns=self.categorical_columns,
index=data.index,
)
for col in categorical_data.columns:
categorical_data[col] = categorical_data[col].apply(str)
imputed_data.append(categorical_data)
if not self.time_columns.empty:
datetime_columns = data.select_dtypes(include=["datetime"]).columns
timedelta_columns = data.select_dtypes(include=["timedelta"]).columns
timedata_copy = data[self.time_columns].copy()
for col in self.time_columns:
timedata_copy[col] = timedata_copy[col][
timedata_copy[col].notnull()
].astype(np.int64)
time_data = pd.DataFrame(
self.time_imputer.transform(timedata_copy),
columns=self.time_columns,
index=data.index,
)
for col in datetime_columns:
time_data[col][data[col].notnull()] = data[col][data[col].notnull()]
time_data[col] = time_data[col].apply(pd.Timestamp)
for col in timedelta_columns:
time_data[col][data[col].notnull()] = data[col][data[col].notnull()]
time_data[col] = time_data[col].apply(pd.Timedelta)
imputed_data.append(time_data)
if imputed_data:
data.update(pd.concat(imputed_data, axis=1))
data.astype(X.dtypes)
return data
def fit_transform(self, X, y=None):
"""
Fit and impute on dataset.
Args:
X: pd.DataFrame, the dataset to be fitted and imputed
Returns:
pd.DataFrame, the imputed dataset
"""
data = X
self.fit(data)
return self.transform(data)
# _______________________________________________________________________________________________________________________
# Imputation with surrogate columns
class Surrogate_Imputer(_BaseImputer):
"""
Imputes feature with surrogate column (numerical,categorical & Time).
- Highly recommended to run Define_dataTypes class first
- it is also recommended to only apply this to features where it makes business sense to creat surrogate column
- feature name has to be provided
- only able to handle one feature at a time
- Numerical values can be imputed with mean or median or filled with zeros
- categorical missing values will be replaced with "Other"
- Time values are imputed with the most frequesnt value
- Ignores target (y) variable
Args:
feature_name: string, provide features name
feature_type: string , all possible values {'numeric','categorical','date'}
strategy: string ,all possible values {'mean','median','zero','not_available','most frequent'}
target: string , name of the target variable
"""
def __init__(self, numeric_strategy, categorical_strategy, target):
self.numeric_strategy = numeric_strategy
self.target = target
self.categorical_strategy = categorical_strategy
def fit(self, dataset, y=None): #
def zeros(x):
return 0
data = dataset
# make a table for numerical variable with strategy stats
if self.numeric_strategy == "mean":
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(np.nanmean)
)
elif self.numeric_strategy == "median":
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(np.nanmedian)
)
else:
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(zeros)
)
self.numeric_columns = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.columns
)
# also need to learn if any columns had NA in training
self.numeric_na = pd.DataFrame(columns=self.numeric_columns)
for i in self.numeric_columns:
if data[i].isnull().any() == True:
self.numeric_na.loc[0, i] = True
else:
self.numeric_na.loc[0, i] = False
# for Catgorical ,
if self.categorical_strategy == "most frequent":
self.categorical_columns = (
data.drop(self.target, axis=1).select_dtypes(include=["object"]).columns
)
self.categorical_stats = pd.DataFrame(
columns=self.categorical_columns
) # place holder
for i in self.categorical_stats.columns:
self.categorical_stats.loc[0, i] = data[i].value_counts().index[0]
# also need to learn if any columns had NA in training, but this is only valid if strategy is "most frequent"
self.categorical_na = pd.DataFrame(columns=self.categorical_columns)
for i in self.categorical_columns:
if sum(data[i].isnull()) > 0:
self.categorical_na.loc[0, i] = True
else:
self.categorical_na.loc[0, i] = False
else:
self.categorical_columns = (
data.drop(self.target, axis=1).select_dtypes(include=["object"]).columns
)
self.categorical_na = pd.DataFrame(columns=self.categorical_columns)
self.categorical_na.loc[
0, :
] = False # (in this situation we are not making any surrogate column)
# for time, there is only one way, pick up the most frequent one
self.time_columns = (
data.drop(self.target, axis=1)
.select_dtypes(include=["datetime64[ns]"])
.columns
)
self.time_stats = pd.DataFrame(columns=self.time_columns) # place holder
self.time_na = pd.DataFrame(columns=self.time_columns)
for i in self.time_columns:
self.time_stats.loc[0, i] = data[i].value_counts().index[0]
# learn if time columns were NA
for i in self.time_columns:
if data[i].isnull().any() == True:
self.time_na.loc[0, i] = True
else:
self.time_na.loc[0, i] = False
return data # nothing to return
def transform(self, dataset, y=None):
data = dataset
# for numeric columns
for i, s in zip(data[self.numeric_columns].columns, self.numeric_stats):
array = data[i].isnull()
data[i].fillna(s, inplace=True)
# make a surrogate column if there was any
if self.numeric_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
# for categorical columns
if self.categorical_strategy == "most frequent":
for i in self.categorical_stats.columns:
# data[i].fillna(self.categorical_stats.loc[0,i],inplace=True)
array = data[i].isnull()
data[i] = data[i].fillna(self.categorical_stats.loc[0, i])
data[i] = data[i].apply(str)
# make surrogate column
if self.categorical_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
else: # this means replace na with "not_available"
for i in self.categorical_columns:
data[i].fillna("not_available", inplace=True)
data[i] = data[i].apply(str)
# no need to make surrogate since not_available is itself a new colum
# for time
for i in self.time_stats.columns:
array = data[i].isnull()
data[i].fillna(self.time_stats.loc[0, i], inplace=True)
# make surrogate column
if self.time_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
return data
def fit_transform(self, dataset, y=None):
data = dataset
data = self.fit(data)
return self.transform(data)
class Iterative_Imputer(_BaseImputer):
def __init__(
self,
regressor: BaseEstimator,
classifier: BaseEstimator,
*,
target=None,
missing_values=np.nan,
initial_strategy_numeric: str = "mean",
initial_strategy_categorical: str = "most frequent",
initial_strategy_time: str = "most frequent",
ordinal_columns: Optional[list] = None,
max_iter: int = 10,
warm_start: bool = False,
imputation_order: str = "ascending",
verbose: int = 0,
random_state: int = None,
add_indicator: bool = False,
):
super().__init__(missing_values=missing_values, add_indicator=add_indicator)
self.regressor = regressor
self.classifier = classifier
self.initial_strategy_numeric = initial_strategy_numeric
self.initial_strategy_categorical = initial_strategy_categorical
self.initial_strategy_time = initial_strategy_time
self.max_iter = max_iter
self.warm_start = warm_start
self.imputation_order = imputation_order
self.verbose = verbose
self.random_state = random_state
self.target = target
if ordinal_columns is None:
ordinal_columns = []
self.ordinal_columns = list(ordinal_columns)
self._column_cleaner = Clean_Colum_Names()
def _initial_imputation(self, X):
if self.initial_imputer_ is None:
self.initial_imputer_ = Simple_Imputer(
target="__TARGET__", # dummy value, we don't actually want to drop anything
numeric_strategy=self.initial_strategy_numeric,
categorical_strategy=self.initial_strategy_categorical,
time_strategy=self.initial_strategy_time,
)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
return X_filled
def _impute_one_feature(self, X, column, X_na_mask, fit):
if not fit:
check_is_fitted(self)
is_classification = (
X[column].dtype.name == "object" or column in self.ordinal_columns
)
if is_classification:
if column in self.classifiers_:
time, dummy, le, estimator = self.classifiers_[column]
elif not fit:
return X
else:
estimator = clone(self._classifier)
time = Make_Time_Features()
dummy = Dummify(column)
le = LabelEncoder()
else:
if column in self.regressors_:
time, dummy, le, estimator = self.regressors_[column]
elif not fit:
return X
else:
estimator = clone(self._regressor)
time = Make_Time_Features()
dummy = Dummify(column)
le = None
if fit:
fit_kwargs = {}
X_train = X[~X_na_mask[column]]
y_train = X_train[column]
# catboost handles categoricals itself
if "catboost" not in str(type(estimator)).lower():
X_train = time.fit_transform(X_train)
X_train = dummy.fit_transform(X_train)
X_train.drop(column, axis=1, inplace=True)
else:
X_train.drop(column, axis=1, inplace=True)
fit_kwargs["cat_features"] = []
for i, col in enumerate(X_train.columns):
if X_train[col].dtype.name == "object":
X_train[col] = pd.Categorical(
X_train[col], ordered=column in self.ordinal_columns
)
fit_kwargs["cat_features"].append(i)
fit_kwargs["cat_features"] = np.array(
fit_kwargs["cat_features"], dtype=int
)
X_train = self._column_cleaner.fit_transform(X_train)
if le:
y_train = le.fit_transform(y_train)
try:
assert self.warm_start
estimator.partial_fit(X_train, y_train)
except:
estimator.fit(X_train, y_train, **fit_kwargs)
X_test = X.drop(column, axis=1)[X_na_mask[column]]
X_test = time.transform(X_test)
# catboost handles categoricals itself
if "catboost" not in str(type(estimator)).lower():
X_test = dummy.transform(X_test)
else:
for col in X_test.select_dtypes("object").columns:
X_test[col] = pd.Categorical(
X_test[col], ordered=column in self.ordinal_columns
)
result = estimator.predict(X_test)
if le:
result = le.inverse_transform(result)
if fit:
if is_classification:
self.classifiers_[column] = (time, dummy, le, estimator)
else:
self.regressors_[column] = (time, dummy, le, estimator)
if result.dtype.name == "float64":
result = result.astype("float32")
X_test[column] = result
X.update(X_test[column])
gc.collect()
return X
def _impute(self, X, fit: bool):
if self.target in X.columns:
target_column = X[self.target]
X = X.drop(self.target, axis=1)
else:
target_column = None
original_columns = X.columns
original_index = X.index
X = X.reset_index(drop=True)
X = self._column_cleaner.fit_transform(X)
self.imputation_sequence_ = (
X.isnull().sum().sort_values(ascending=self.imputation_order == "ascending")
)
self.imputation_sequence_ = [
col
for col in self.imputation_sequence_[self.imputation_sequence_ > 0].index
if X[col].dtype.name != "datetime64[ns]"
]
X_na_mask = X.isnull()
X_imputed = self._initial_imputation(X.copy())
for i in range(self.max_iter if fit else 1):
for feature in self.imputation_sequence_:
get_logger().info(f"Iterative Imputation: {i+1} cycle | {feature}")
X_imputed = self._impute_one_feature(X_imputed, feature, X_na_mask, fit)
X_imputed.columns = original_columns
X_imputed.index = original_index
if target_column is not None:
X_imputed[self.target] = target_column
return X_imputed
def transform(self, X, y=None, **fit_params):
return self._impute(X, fit=False)
def fit_transform(self, X, y=None, **fit_params):
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.regressor is None:
raise ValueError("No regressor provided")
else:
self._regressor = clone(self.regressor)
try:
self._regressor.set_param(random_state=self.random_state_)
except:
pass
if self.classifier is None:
raise ValueError("No classifier provided")
else:
self._classifier = clone(self.classifier)
try:
self._classifier.set_param(random_state=self.random_state_)
except:
pass
self.classifiers_ = {}
self.regressors_ = {}
self.initial_imputer_ = None
return self._impute(X, fit=True)
def fit(self, X, y=None, **fit_params):
self.fit_transform(X, y=y, **fit_params)
return self
# _______________________________________________________________________________________________________________________
# Zero and Near Zero Variance
class Zroe_NearZero_Variance(BaseEstimator, TransformerMixin):
"""
- it eliminates the features having zero variance
- it eliminates the features haveing near zero variance
- Near zero variance is determined by
-1) Count of unique points divided by the total length of the feature has to be lower than a pre sepcified threshold
-2) Most common point(count) divided by the second most common point(count) in the feature is greater than a pre specified threshold
Once both conditions are met , the feature is dropped
-Ignores target variable
Args:
threshold_1: float (between 0.0 to 1.0) , default is .10
threshold_2: int (between 1 to 100), default is 20
tatget variable : string, name of the target variable
"""
def __init__(self, target, threshold_1=0.1, threshold_2=20):
self.threshold_1 = threshold_1
self.threshold_2 = threshold_2
self.target = target
def fit(
self, dataset, y=None
): # from training data set we are going to learn what columns to drop
data = dataset
self.to_drop = []
sampl_len = len(data[self.target])
for i in data.drop(self.target, axis=1).columns:
# get the number of unique counts
u = pd.DataFrame(data[i].value_counts()).sort_values(
by=i, ascending=False, inplace=False
)
# take len of u and divided it by the total sample numbers, so this will check the 1st rule , has to be low say 10%
# import pdb; pdb.set_trace()
first = len(u) / sampl_len
# then check if most common divided by 2nd most common ratio is 20 or more
if (
len(u[i]) == 1
): # this means that if column is non variance , automatically make the number big to drop it
second = 100
else:
second = u.iloc[0, 0] / u.iloc[1, 0]
# if both conditions are true then drop the column, however, we dont want to alter column that indicate NA's
if (first <= 0.10) and (second >= 20) and (i[-10:] != "_surrogate"):
self.to_drop.append(i)
# now drop if the column has zero variance
if (second == 100) and (i[-10:] != "_surrogate"):
self.to_drop.append(i)
def transform(
self, dataset, y=None
): # since it is only for training data set , nothing here
data = dataset.drop(self.to_drop, axis=1)
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
return self.transform(data)
# ____________________________________________________________________________________________________________________________
# rare catagorical variables
class Catagorical_variables_With_Rare_levels(BaseEstimator, TransformerMixin):
"""
-Merges levels in catagorical features with more frequent level if they appear less than a threshold count
e.g. Col=[a,a,a,a,b,b,c,c]
if threshold is set to 2 , then c will be mrged with b because both are below threshold
There has to be atleast two levels belwo threshold for this to work
the process will keep going until all the levels have atleast 2(threshold) counts
-Only handles catagorical features
-It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first
-Ignores target variable
Args:
threshold: int , default 10
target: string , name of the target variable
new_level_name: string , name given to the new level generated, default 'others'
"""
def __init__(self, target, new_level_name="others_infrequent", threshold=0.05):
self.threshold = threshold
self.target = target
self.new_level_name = new_level_name
def fit(
self, dataset, y=None
): # we will learn for what columnns what are the level to merge as others
# every level of the catagorical feature has to be more than threshols, if not they will be clubed togather as "others"
# in order to apply, there should be atleast two levels belwo the threshold !
# creat a place holder
data = dataset
self.ph = pd.DataFrame(
columns=data.drop(self.target, axis=1)
.select_dtypes(include="object")
.columns
)
# ph.columns = df.columns# catagorical only
for i in data[self.ph.columns].columns:
# determine the infrequebt count
v_c = data[i].value_counts()
count_th = round(v_c.quantile(self.threshold))
a = np.sum(
pd.DataFrame(data[i].value_counts().sort_values())[i] <= count_th
)
if a >= 2: # rare levels has to be atleast two
count = pd.DataFrame(data[i].value_counts().sort_values())
count.columns = ["fre"]
count = count[count["fre"] <= count_th]
to_club = list(count.index)
self.ph.loc[0, i] = to_club
else:
self.ph.loc[0, i] = []
# # also need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others
# self.ph_level = pd.DataFrame(columns=data.drop(self.target,axis=1).select_dtypes(include="object").columns)
# for i in self.ph_level.columns:
# self.ph_level.loc[0,i] = list(data[i].value_counts().sort_values().index)
def transform(self, dataset, y=None): #
# transorm
data = dataset
for i in data[self.ph.columns].columns:
t_replace = self.ph.loc[0, i]
data[i].replace(
to_replace=t_replace, value=self.new_level_name, inplace=True
)
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
return self.transform(data)
# _______________________________________________________________________________________________________________________
# new catagorical level in test
class New_Catagorical_Levels_in_TestData(BaseEstimator, TransformerMixin):
"""
-This treats if a new level appears in the test dataset catagorical's feature (i.e a level on whihc model was not trained previously)
-It simply replaces the new level in test data set with the most frequent or least frequent level in the same feature in the training data set
-It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first
-Ignores target variable
Args:
target: string , name of the target variable
replacement_strategy:string , 'raise exception', 'least frequent' or 'most frequent' (default 'most frequent' )
"""
def __init__(self, target, replacement_strategy="most frequent"):
self.target = target
self.replacement_strategy = replacement_strategy
def fit(self, data, y=None):
# need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others
self.ph_train_level = pd.DataFrame(
columns=data.drop(self.target, axis=1)
.select_dtypes(include="object")
.columns
)
for i in self.ph_train_level.columns:
if self.replacement_strategy == "least frequent":
self.ph_train_level.loc[0, i] = list(
data[i].value_counts().sort_values().index
)
else:
self.ph_train_level.loc[0, i] = list(data[i].value_counts().index)
def transform(self, data, y=None): #
# transorm
# we need to learn the same for test data , and then we will compare to check what levels are new in there
self.ph_test_level = pd.DataFrame(
columns=data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include="object")
.columns
)
for i in self.ph_test_level.columns:
self.ph_test_level.loc[0, i] = list(
data[i].value_counts().sort_values().index
)
# new we have levels for both test and train, we will start comparing and replacing levels in test set (Only if test set has new levels)
for i in self.ph_test_level.columns:
new = list(
(set(self.ph_test_level.loc[0, i]) - set(self.ph_train_level.loc[0, i]))
)
# now if there is a difference , only then replace it
if len(new) > 0:
if self.replacement_strategy == "raise exception":
raise ValueError(
f"Column '{i}' contains levels '{new}' which were not present in train data."
)
data[i].replace(new, self.ph_train_level.loc[0, i][0], inplace=True)
return data
def fit_transform(
self, data, y=None
): # There is no transformation happening in training data set, its all about test
self.fit(data)
return data
# _______________________________________________________________________________________________________________________
# Group akin features
class Group_Similar_Features(BaseEstimator, TransformerMixin):
"""
- Given a list of features , it creates aggregate features
- features created are Min, Max, Mean, Median, Mode & Std
- Only works on numerical features
Args:
list_of_similar_features: list of list, string , e.g. [['col',col2],['col3','col4']]
group_name: list, group name/names to be added as prefix to aggregate features, e.g ['gorup1','group2']
"""
def __init__(self, group_name=[], list_of_grouped_features=[[]]):
self.list_of_similar_features = list_of_grouped_features
self.group_name = group_name
# if list of list not given
try:
np.array(self.list_of_similar_features).shape[0]
except:
raise (
"Group_Similar_Features: list_of_grouped_features is not provided as list of list"
)
def fit(self, data, y=None):
# nothing to learn
return self
def transform(self, dataset, y=None):
data = dataset
# # only going to process if there is an actual missing value in training data set
if len(self.list_of_similar_features) > 0:
for f, g in zip(self.list_of_similar_features, self.group_name):
data[g + "_Min"] = data[f].apply(np.min, 1)
data[g + "_Max"] = data[f].apply(np.max, 1)
data[g + "_Mean"] = data[f].apply(np.mean, 1)
data[g + "_Median"] = data[f].apply(np.median, 1)
data[g + "_Mode"] = stats.mode(data[f], 1)[0]
data[g + "_Std"] = data[f].apply(np.std, 1)
return data
else:
return data
def fit_transform(self, data, y=None):
return self.transform(data)
# ____________________________________________________________________________________________________________________________________________________________________
# Binning for Continious
class Binning(BaseEstimator, TransformerMixin):
"""
- Converts numerical variables to catagorical variable through binning
- Number of binns are automitically determined through Sturges method
- Once discretize, original feature will be dropped
Args:
features_to_discretize: list of featur names to be binned
"""
def __init__(self, features_to_discretize):
self.features_to_discretize = features_to_discretize
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# only do if features are provided
if len(self.features_to_discretize) > 0:
data_t = self.disc.transform(
np.array(data[self.features_to_discretize]).reshape(
-1, self.len_columns
)
)
# make pandas data frame
data_t = pd.DataFrame(
data_t, columns=self.features_to_discretize, index=data.index
)
# all these columns are catagorical
data_t = data_t.astype(str)
# drop original columns
data.drop(self.features_to_discretize, axis=1, inplace=True)
# add newly created columns
data = pd.concat((data, data_t), axis=1)
return data
def fit_transform(self, dataset, y=None):
data = dataset
# only do if features are given
if len(self.features_to_discretize) > 0:
# place holder for all the features for their binns
self.binns = []
for i in self.features_to_discretize:
# get numbr of binns
hist, _ = np.histogram(data[i], bins="sturges")
self.binns.append(len(hist))
# how many colums to deal with
self.len_columns = len(self.features_to_discretize)
# now do fit transform
self.disc = KBinsDiscretizer(
n_bins=self.binns, encode="ordinal", strategy="kmeans"
)
data_t = self.disc.fit_transform(
np.array(data[self.features_to_discretize]).reshape(
-1, self.len_columns
)
)
# make pandas data frame
data_t = pd.DataFrame(
data_t, columns=self.features_to_discretize, index=data.index
)
# all these columns are catagorical
data_t = data_t.astype(str)
# drop original columns
data.drop(self.features_to_discretize, axis=1, inplace=True)
# add newly created columns
data = pd.concat((data, data_t), axis=1)
return data
# ______________________________________________________________________________________________________________________
# Scaling & Power Transform
class Scaling_and_Power_transformation(BaseEstimator, TransformerMixin):
"""
-Given a data set, applies Min Max, Standar Scaler or Power Transformation (yeo-johnson)
-it is recommended to run Define_dataTypes first
- ignores target variable
Args:
target: string , name of the target variable
function_to_apply: string , default 'zscore' (standard scaler), all other {'minmaxm','yj','quantile','robust','maxabs'} ( min max,yeo-johnson & quantile power transformation, robust and MaxAbs scaler )
"""
def __init__(self, target, function_to_apply="zscore", random_state_quantile=42):
self.target = target
self.function_to_apply = function_to_apply
self.random_state_quantile = random_state_quantile
# self.transform_target = transform_target
# self.ml_usecase = ml_usecase
def fit(self, dataset, y=None):
data = dataset
# we only want to apply if there are numeric columns
self.numeric_features = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=["float32", "float64", "int64"])
.columns
)
if len(self.numeric_features) > 0:
if self.function_to_apply == "zscore":
self.scale_and_power = StandardScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "minmax":
self.scale_and_power = MinMaxScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "yj":
self.scale_and_power = PowerTransformer(
method="yeo-johnson", standardize=True
)
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "quantile":
self.scale_and_power = QuantileTransformer(
random_state=self.random_state_quantile,
output_distribution="normal",
)
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "robust":
self.scale_and_power = RobustScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "maxabs":
self.scale_and_power = MaxAbsScaler()
self.scale_and_power.fit(data[self.numeric_features])
return self
def transform(self, dataset, y=None):
data = dataset
if len(self.numeric_features) > 0:
self.data_t = pd.DataFrame(
self.scale_and_power.transform(data[self.numeric_features])
)
# we need to set the same index as original data
self.data_t.index = data.index
self.data_t.columns = self.numeric_features
for i in self.numeric_features:
data[i] = self.data_t[i]
return data
else:
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
# convert target if appropriate
# default behavious is quantile transformer
# if ((self.ml_usecase == 'regression') and (self.transform_target == True)):
# self.scale_and_power_target = QuantileTransformer(random_state=self.random_state_quantile,output_distribution='normal')
# data[self.target]=self.scale_and_power_target.fit_transform(np.array(data[self.target]).reshape(-1,1))
return self.transform(data)
# ______________________________________________________________________________________________________________________
# Scaling & Power Transform
class Target_Transformation(BaseEstimator, TransformerMixin):
"""
- Applies Power Transformation (yeo-johnson , Box-Cox) to target variable (Applicable to Regression only)
- 'bc' for Box_Coc & 'yj' for yeo-johnson, default is Box-Cox
- if target containes negtive / zero values , yeo-johnson is automatically selected
"""
def __init__(self, target, function_to_apply="bc"):
self.target = target
if function_to_apply == "bc":
function_to_apply = "box-cox"
else:
function_to_apply = "yeo-johnson"
self.function_to_apply = function_to_apply
def inverse_transform(self, dataset, y=None):
data = self.p_transform_target.inverse_transform(
np.array(dataset).reshape(-1, 1)
)
return data
def fit(self, dataset, y=None):
self.fit_transform(dataset, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
if self.target in dataset.columns:
# apply transformation
data[self.target] = self.p_transform_target.transform(
np.array(data[self.target]).reshape(-1, 1)
)
return data
def fit_transform(self, dataset, y=None):
data = dataset
# if target has zero or negative values use yj instead
if any(data[self.target] <= 0):
self.function_to_apply = "yeo-johnson"
# apply transformation
self.p_transform_target = PowerTransformer(method=self.function_to_apply)
data[self.target] = self.p_transform_target.fit_transform(
np.array(data[self.target]).reshape(-1, 1)
)
return data
# __________________________________________________________________________________________________________________________
# Time feature extractor
class Make_Time_Features(BaseEstimator, TransformerMixin):
"""
-Given a time feature , it extracts more features
- Only accepts / works where feature / data type is datetime64[ns]
- full list of features is:
['month','weekday',is_month_end','is_month_start','hour']
- all extracted features are defined as string / object
-it is recommended to run Define_dataTypes first
Args:
time_feature: list of feature names as datetime64[ns] , default empty/none , if empty/None , it will try to pickup dates automatically where data type is datetime64[ns]
list_of_features: list of required features , default value ['month','weekday','is_month_end','is_month_start','hour']
"""
def __init__(
self,
time_feature=None,
list_of_features=["month", "weekday", "is_month_end", "is_month_start", "hour"],
):
self.time_feature = time_feature
self.list_of_features = set(list_of_features)
def fit(self, data, y=None):
if self.time_feature is None:
self.time_feature = data.select_dtypes(include=["datetime64[ns]"]).columns
self.has_hour_ = set()
for i in self.time_feature:
if "hour" in self.list_of_features:
if any(x.hour for x in data[i]):
self.has_hour_.add(i)
return self
def transform(self, dataset, y=None):
data = dataset.copy()
# run fit transform first
def get_time_features(r):
features = []
if "month" in self.list_of_features:
features.append(("_month", str(r.month)))
if "weekday" in self.list_of_features:
features.append(("_weekday", str(r.weekday())))
if "is_month_end" in self.list_of_features:
features.append(
(
"_is_month_end",
"1"
if calendar.monthrange(r.year, r.month)[1] == r.day
else "0",
)
)
if "is_month_start" in self.list_of_features:
features.append(("_is_month_start", "1" if r.day == 1 else "0"))
return tuple(features)
# start making features for every column in the time list
for i in self.time_feature:
list_of_features = [get_time_features(r) for r in data[i]]
fd = defaultdict(list)
for x in list_of_features:
for k, v in x:
fd[k].append(v)
for k, v in fd.items():
data[i + k] = v
# make hour column if choosen
if "hour" in self.list_of_features and i in self.has_hour_:
h = [r.hour for r in data[i]]
data[f"{i}_hour"] = h
data[f"{i}_hour"] = data[f"{i}_hour"].apply(str)
# we dont need time columns any more
data.drop(self.time_feature, axis=1, inplace=True)
return data
def fit_transform(self, dataset, y=None):
# if no columns names are given , then pick datetime columns
self.fit(dataset, y=y)
return self.transform(dataset, y=y)
# ____________________________________________________________________________________________________________________________________________________________________
# Ordinal transformer
class Ordinal(BaseEstimator, TransformerMixin):
"""
- converts categorical features into ordinal values
- takes a dataframe , and information about column names and ordered categories as dict
- returns float panda data frame
"""
def __init__(self, info_as_dict):
self.info_as_dict = info_as_dict
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
new_data_test = pd.DataFrame(
self.enc.transform(data[self.info_as_dict.keys()]),
columns=self.info_as_dict.keys(),
index=data.index,
)
for i in self.info_as_dict.keys():
data[i] = new_data_test[i]
return data
def fit_transform(self, dataset, y=None):
data = dataset
# creat categories from given keys in the data set
cat_list = []
for i in self.info_as_dict.values():
i = [np.array(i)]
cat_list = cat_list + i
# now do fit transform
self.enc = OrdinalEncoder(categories=cat_list)
new_data_train = pd.DataFrame(
self.enc.fit_transform(data.loc[:, self.info_as_dict.keys()]),
columns=self.info_as_dict,
index=data.index,
)
# new_data = pd.DataFrame(self.enc.fit_transform(data.loc[:,self.info_as_dict.keys()]))
for i in self.info_as_dict.keys():
data[i] = new_data_train[i]
return data
# _______________________________________________________________________________________________________________________
# make dummy variables
class Dummify(BaseEstimator, TransformerMixin):
"""
- makes one hot encoded variables for dummy variable
- it is HIGHLY recommended to run the Select_Data_Type class first
- Ignores target variable
Args:
target: string , name of the target variable
"""
def __init__(self, target):
self.target = target
# creat ohe object
self.ohe = OneHotEncoder(handle_unknown="ignore", dtype=np.float32)
def fit(self, X, y=None):
data = X
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
# we need to learn the column names once the training data set is dummify
# save non categorical data
self.data_nonc = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(exclude=("object"))
if self.target in data.columns:
self.target_column = data[[self.target]]
else:
self.target_column = None
# # plus we will only take object data types
categorical_data = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(include=("object"))
# # now fit the training column
self.ohe.fit(categorical_data)
self.data_columns = self.ohe.get_feature_names(categorical_data.columns)
return self
def transform(self, X, y=None):
data = X.copy()
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
# only for test data
self.data_nonc = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(exclude=("object"))
# fit without target and only categorical columns
array = self.ohe.transform(
data.drop(self.target, axis=1, errors="ignore").select_dtypes(
include=("object")
)
).toarray()
data_dummies = pd.DataFrame(array, columns=self.data_columns)
data_dummies.index = self.data_nonc.index
if self.target in data.columns:
target_column = data[[self.target]]
else:
target_column = None
# now put target , numerical and categorical variables back togather
data = pd.concat((target_column, self.data_nonc, data_dummies), axis=1)
del self.data_nonc
return data
else:
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
self.fit(data)
# fit without target and only categorical columns
array = self.ohe.transform(
data.drop(self.target, axis=1, errors="ignore").select_dtypes(
include=("object")
)
).toarray()
data_dummies = pd.DataFrame(array, columns=self.data_columns)
data_dummies.index = self.data_nonc.index
# now put target , numerical and categorical variables back togather
data = pd.concat((self.target_column, self.data_nonc, data_dummies), axis=1)
# remove unwanted attributes
del (self.target_column, self.data_nonc)
return data
else:
return data
# _______________________________________________________________________________________________________________________
# Outlier
class Outlier(BaseEstimator, TransformerMixin):
"""
- Removes outlier using ABOD,KNN,IFO,PCA & HOBS using hard voting
- Only takes numerical / One Hot Encoded features
"""
def __init__(
self, target, contamination=0.20, random_state=42, methods=["knn", "iso", "pca"]
):
self.target = target
self.contamination = contamination
self.random_state = random_state
self.methods = methods
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, data, y=None):
return data
def fit_transform(self, dataset, y=None):
# dummify if there are any obects
if len(dataset.select_dtypes(include="object").columns) > 0:
self.dummy = Dummify(self.target)
data = self.dummy.fit_transform(dataset)
else:
data = dataset
data_without_target = data.drop(self.target, axis=1)
if "knn" in self.methods:
self.knn = KNN(contamination=self.contamination)
self.knn.fit(data_without_target)
knn_predict = self.knn.predict(data_without_target)
data_without_target["knn"] = knn_predict
if "iso" in self.methods:
self.iso = IForest(
contamination=self.contamination,
random_state=self.random_state,
behaviour="new",
)
self.iso.fit(data_without_target)
iso_predict = self.iso.predict(data_without_target)
data_without_target["iso"] = iso_predict
if "pca" in self.methods:
self.pca = PCA_od(
contamination=self.contamination, random_state=self.random_state
)
self.pca.fit(data_without_target)
pca_predict = self.pca.predict(data_without_target)
data_without_target["pca"] = pca_predict
data_without_target["vote_outlier"] = data_without_target[self.methods].sum(
axis=1
)
self.outliers = data_without_target[
data_without_target["vote_outlier"] == len(self.methods)
].index
return dataset[~dataset.index.isin(self.outliers)]
# ____________________________________________________________________________________________________________________________________________________________________
# Column Name cleaner transformer
class Clean_Colum_Names(BaseEstimator, TransformerMixin):
"""
- Cleans special chars that are not supported by jason format
"""
def fit(self, data, y=None):
return self
def transform(self, dataset, y=None):
data = dataset
data.columns = data.columns.str.replace(r"[\,\}\{\]\[\:\"\']", "")
return data
def fit_transform(self, dataset, y=None):
return self.transform(dataset, y=y)
# __________________________________________________________________________________________________________________________________________________________________________
# Clustering entire data
class Cluster_Entire_Data(BaseEstimator, TransformerMixin):
"""
- Applies kmeans clustering to the entire data set and produce clusters
- Highly recommended to run the DataTypes_Auto_infer class first
Args:
target_variable: target variable (integer or numerical only)
check_clusters_upto: to determine optimum number of kmeans clusters, set the uppler limit of clusters
"""
def __init__(self, target, check_clusters=20, random_state=42):
self.target = target
self.check_clusters = check_clusters + 1
self.random_state = random_state
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
data = data.drop(self.target, axis=1, errors="ignore")
# first convert to dummy
if len(data.select_dtypes(include="object").columns) > 0:
data_t1 = self.dummy.transform(data)
else:
data_t1 = data
# # # now make PLS
# # data_t1 = self.pls.transform(data_t1)
# # data_t1 = self.pca.transform(data_t1)
# # now predict with the clustes
predict = pd.DataFrame(self.k_object.predict(data_t1), index=data.index)
data["data_cluster"] = predict
data["data_cluster"] = data["data_cluster"].astype("object")
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# first convert to dummy (if there are objects in data set)
if len(data.select_dtypes(include="object").columns) > 0:
self.dummy = Dummify(self.target)
data_t1 = self.dummy.fit_transform(data)
data_t1 = data_t1.drop(self.target, axis=1)
else:
data_t1 = data.drop(self.target, axis=1)
# now make PLS
# self.pls = PLSRegression(n_components=len(data_t1.columns)-1)
# data_t1 = self.pls.fit_transform(data_t1.drop(self.target,axis=1),data_t1[self.target])[0]
# self.pca = PCA(n_components=len(data_t1.columns)-1)
# data_t1 = self.pca.fit_transform(data_t1.drop(self.target,axis=1))
# we are goign to make a place holder , for 2 to 20 clusters
self.ph = pd.DataFrame(
np.arange(2, self.check_clusters, 1), columns=["clusters"]
)
self.ph["Silhouette"] = float(0)
self.ph["calinski"] = float(0)
# Now start making clusters
for k in self.ph.index:
c = self.ph["clusters"][k]
self.k_object = cluster.KMeans(
n_clusters=c,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random_state,
)
self.k_object.fit(data_t1)
self.ph.iloc[k, 1] = metrics.silhouette_score(
data_t1, self.k_object.labels_
)
self.ph.iloc[k, 2] = metrics.calinski_harabasz_score(
data_t1, self.k_object.labels_
)
# now standardize the scores and make a total column
m = MinMaxScaler((-1, 1))
self.ph["calinski"] = m.fit_transform(
np.array(self.ph["calinski"]).reshape(-1, 1)
)
self.ph["Silhouette"] = m.fit_transform(
np.array(self.ph["Silhouette"]).reshape(-1, 1)
)
self.ph["total"] = self.ph["Silhouette"] + self.ph["calinski"]
# sort it by total column and take the first row column 0 , that would represent the optimal clusters
try:
self.clusters = int(
self.ph[self.ph["total"] == max(self.ph["total"])]["clusters"]
)
except: # in case there isnt a decisive measure , take calinski as yeard stick
self.clusters = int(
self.ph[self.ph["calinski"] == max(self.ph["calinski"])]["clusters"]
)
# Now make the final cluster object
self.k_object = cluster.KMeans(
n_clusters=self.clusters,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random_state,
)
# now do fit predict
predict = pd.DataFrame(self.k_object.fit_predict(data_t1), index=data.index)
data["data_cluster"] = predict
data["data_cluster"] = data["data_cluster"].astype("object")
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
# __________________________________________________________________________________________________________________________________________
# Clustering catagorical data
class Reduce_Cardinality_with_Clustering(BaseEstimator, TransformerMixin):
"""
- Reduces the level of catagorical column / cardinality through clustering
- Highly recommended to run the DataTypes_Auto_infer class first
Args:
target_variable: target variable (integer or numerical only)
catagorical_feature: list of features on which clustering is to be applied / cardinality to be reduced
check_clusters_upto: to determine optimum number of kmeans clusters, set the uppler limit of clusters
"""
def __init__(
self, target, catagorical_feature=[], check_clusters=30, random_state=42,
):
self.target = target
self.catagorical_feature = catagorical_feature
self.check_clusters = check_clusters + 1
self.random = random_state
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# we already know which leval belongs to whihc cluster , so all w need is to replace levels with clusters we already have from training data set
for i, z in zip(self.catagorical_feature, self.ph_data):
data[i] = data[i].replace(list(z["levels"]), z["cluster"])
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# first convert to dummy
if len(data.select_dtypes(include="object").columns) > 0:
self.dummy = Dummify(self.target)
data_t = self.dummy.fit_transform(
data.drop(self.catagorical_feature, axis=1)
)
# data_t1 = data_t1.drop(self.target,axis=1)
else:
data_t = data.drop(self.catagorical_feature, axis=1)
# now make PLS
self.pls = PLSRegression(
n_components=2
) # since we are only using two componenets to group #PLSRegression(n_components=len(data_t1.columns)-1)
data_pls = self.pls.fit_transform(
data_t.drop(self.target, axis=1), data_t[self.target]
)[0]
# # now we will take one component and then we calculate mean, median, min, max and sd of that one component grouped by the catagorical levels
self.ph_data = []
self.ph_clusters = []
for i in self.catagorical_feature:
data_t1 = pd.DataFrame(
dict(levels=data[i], comp1=data_pls[:, 0], comp2=data_pls[:, 1]),
index=data.index,
)
# now group by feature
data_t1 = data_t1.groupby("levels")
data_t1 = data_t1[["comp1", "comp2"]].agg(
["mean", "median", "min", "max", "std"]
) # this gives us a df with only numeric columns (min , max ) and level as index
# some time if a level has only one record its std will come up as NaN, so convert NaN to 1
data_t1.fillna(1, inplace=True)
# now number of clusters cant be more than the number of samples in aggregated data , so
self.check_clusters = min(self.check_clusters, len(data_t1))
# # we are goign to make a place holder , for 2 to 20 clusters
self.ph = pd.DataFrame(
np.arange(2, self.check_clusters, 1), columns=["clusters"]
)
self.ph["Silhouette"] = float(0)
self.ph["calinski"] = float(0)
# Now start making clusters
for k in self.ph.index:
c = self.ph["clusters"][k]
self.k_object = cluster.KMeans(
n_clusters=c,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random,
)
self.k_object.fit(data_t1)
self.ph.iloc[k, 1] = metrics.silhouette_score(
data_t1, self.k_object.labels_
)
self.ph.iloc[k, 2] = metrics.calinski_harabasz_score(
data_t1, self.k_object.labels_
)
# now standardize the scores and make a total column
m = MinMaxScaler((-1, 1))
self.ph["calinski"] = m.fit_transform(
np.array(self.ph["calinski"]).reshape(-1, 1)
)
self.ph["Silhouette"] = m.fit_transform(
np.array(self.ph["Silhouette"]).reshape(-1, 1)
)
self.ph["total"] = self.ph["Silhouette"] + self.ph["calinski"]
# sort it by total column and take the first row column 0 , that would represent the optimal clusters
try:
self.clusters = int(
self.ph[self.ph["total"] == max(self.ph["total"])]["clusters"]
)
except: # in case there isnt a decisive measure , take calinski as yeard stick
self.clusters = int(
self.ph[self.ph["calinski"] == max(self.ph["calinski"])]["clusters"]
)
self.ph_clusters.append(self.ph)
# Now make the final cluster object
self.k_object = cluster.KMeans(
n_clusters=self.clusters,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random,
)
# now do fit predict
predict = self.k_object.fit_predict(data_t1)
# put it back with the group by aggregate columns
data_t1["cluster"] = predict
data_t1["cluster"] = data_t1["cluster"].apply(str)
# now we dont need all the columns, only the cluster column is required along with the index (index also has a name , we groupy as "levels")
data_t1 = data_t1[["cluster"]]
# now convert index ot the column
data_t1.reset_index(
level=0, inplace=True
) # this table now only contains every level and its cluster
# self.data_t1= data_t1
# we can now replace cluster with the original level in the original data frame
data[i] = data[i].replace(list(data_t1["levels"]), data_t1["cluster"])
self.ph_data.append(data_t1)
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
# ____________________________________________________________________________________________________________________________________________
# Clustering catagorical data
class Reduce_Cardinality_with_Counts(BaseEstimator, TransformerMixin):
"""
- Reduces the level of catagorical column by replacing levels with their count & converting objects into float
Args:
catagorical_feature: list of features on which clustering is to be applied
"""
def __init__(self, catagorical_feature=[], float_dtype="float32"):
self.catagorical_feature = catagorical_feature
self.float_dtype = float_dtype
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# we already know level counts
for i, z, k in zip(self.catagorical_feature, self.ph_data, self.ph_u):
data[i] = data[i].replace(k, z["counts"])
data[i] = data[i].astype(self.float_dtype)
return data
def fit_transform(self, dataset, y=None):
data = dataset
#
self.ph_data = []
self.ph_u = []
for i in self.catagorical_feature:
data_t1 = pd.DataFrame(
dict(
levels=data[i].groupby(data[i], sort=False).count().index,
counts=data[i].groupby(data[i], sort=False).count().values,
)
)
u = data[i].unique()
# replace levels with counts
data[i].replace(u, data_t1["counts"], inplace=True)
data[i] = data[i].astype(self.float_dtype)
self.ph_data.append(data_t1)
self.ph_u.append(u)
return data
# ____________________________________________________________________________________________________________________________________________
# take noneliner transformations
class Make_NonLiner_Features(BaseEstimator, TransformerMixin):
"""
- convert numerical features into polynomial features
- it is HIGHLY recommended to run the Autoinfer_Data_Type class first
- Ignores target variable
- it picks up data type float32 as numerical
- for multiclass classification problem , set subclass arg to 'multi'
Args:
target: string , name of the target variable
Polynomial_degree: int ,default 2
"""
def __init__(
self,
target,
ml_usecase="classification",
polynomial_degree=2,
other_nonliner_features=["sin", "cos", "tan"],
top_features_to_pick=0.20,
random_state=42,
subclass="ignore",
n_jobs=1,
float_dtype="float32",
):
self.target = target
self.polynomial_degree = polynomial_degree
self.ml_usecase = ml_usecase
self.other_nonliner_features = other_nonliner_features
self.top_features_to_pick = top_features_to_pick
self.random_state = random_state
self.subclass = subclass
self.n_jobs = n_jobs
self.float_dtype = float_dtype
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None): # same application for test and train
data = dataset
self.numeric_columns = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=self.float_dtype)
.columns
)
if self.polynomial_degree >= 2: # dont run anything if powr is les than 2
# self.numeric_columns = data.drop(self.target,axis=1,errors='ignore').select_dtypes(include="float32").columns
# start taking powers
for i in range(2, self.polynomial_degree + 1):
ddc_power = np.power(data[self.numeric_columns], i)
ddc_col = list(ddc_power.columns)
ii = str(i)
ddc_col = [ddc_col + "_Power" + ii for ddc_col in ddc_col]
ddc_power.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_power),axis=1)
else:
ddc_power = pd.DataFrame()
# take sin:
if "sin" in self.other_nonliner_features:
ddc_sin = np.sin(data[self.numeric_columns])
ddc_col = list(ddc_sin.columns)
ddc_col = ["sin(" + i + ")" for i in ddc_col]
ddc_sin.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_sin),axis=1)
else:
ddc_sin = pd.DataFrame()
# take cos:
if "cos" in self.other_nonliner_features:
ddc_cos = np.cos(data[self.numeric_columns])
ddc_col = list(ddc_cos.columns)
ddc_col = ["cos(" + i + ")" for i in ddc_col]
ddc_cos.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_cos),axis=1)
else:
ddc_cos = pd.DataFrame()
# take tan:
if "tan" in self.other_nonliner_features:
ddc_tan = np.tan(data[self.numeric_columns])
ddc_col = list(ddc_tan.columns)
ddc_col = ["tan(" + i + ")" for i in ddc_col]
ddc_tan.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_tan),axis=1)
else:
ddc_tan = pd.DataFrame()
# dummy_all
dummy_all = pd.concat((data, ddc_power, ddc_sin, ddc_cos, ddc_tan), axis=1)
# we can select top features using RF
# # and we only want to do this if the dummy all have more than 50 features
# if len(dummy_all.columns) > 71:
dummy_all = dummy_all[self.columns_to_keep]
if self.target in dataset.columns:
dummy_all[self.target] = dataset[self.target]
return dummy_all
def fit_transform(self, dataset, y=None):
data = dataset
self.numeric_columns = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=self.float_dtype)
.columns
)
if self.polynomial_degree >= 2: # dont run anything if powr is les than 2
# self.numeric_columns = data.drop(self.target,axis=1,errors='ignore').select_dtypes(include="float32").columns
# start taking powers
for i in range(2, self.polynomial_degree + 1):
ddc_power = np.power(data[self.numeric_columns], i)
ddc_col = list(ddc_power.columns)
ii = str(i)
ddc_col = [ddc_col + "_Power" + ii for ddc_col in ddc_col]
ddc_power.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_power),axis=1)
else:
ddc_power = pd.DataFrame()
# take sin:
if "sin" in self.other_nonliner_features:
ddc_sin = np.sin(data[self.numeric_columns])
ddc_col = list(ddc_sin.columns)
ddc_col = ["sin(" + i + ")" for i in ddc_col]
ddc_sin.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_sin),axis=1)
else:
ddc_sin = pd.DataFrame()
# take cos:
if "cos" in self.other_nonliner_features:
ddc_cos = np.cos(data[self.numeric_columns])
ddc_col = list(ddc_cos.columns)
ddc_col = ["cos(" + i + ")" for i in ddc_col]
ddc_cos.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_cos),axis=1)
else:
ddc_cos = pd.DataFrame()
# take tan:
if "tan" in self.other_nonliner_features:
ddc_tan = np.tan(data[self.numeric_columns])
ddc_col = list(ddc_tan.columns)
ddc_col = ["tan(" + i + ")" for i in ddc_col]
ddc_tan.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_tan),axis=1)
else:
ddc_tan = pd.DataFrame()
# dummy_all
dummy_all = pd.concat(
(data[[self.target]], ddc_power, ddc_sin, ddc_cos, ddc_tan), axis=1
)
# we can select top features using our Feature Selection Classic transformer
afs = Advanced_Feature_Selection_Classic(
target=self.target,
ml_usecase=self.ml_usecase,
top_features_to_pick=self.top_features_to_pick,
random_state=self.random_state,
subclass=self.subclass,
n_jobs=self.n_jobs,
)
dummy_all_t = afs.fit_transform(dummy_all)
data = pd.concat((data, dummy_all_t), axis=1)
# # making sure no duplicated columns are there
data = data.loc[:, ~data.columns.duplicated()]
self.columns_to_keep = data.drop(self.target, axis=1).columns
return data
# ______________________________________________________________________________________________________________________________________________________
# Feature Selection
class Advanced_Feature_Selection_Classic(BaseEstimator, TransformerMixin):
"""
- Selects important features and reduces the feature space. Feature selection is based on Random Forest , Light GBM and Correlation
- to run on multiclass classification , set the subclass argument to 'multi'
"""
def __init__(
self,
target,
ml_usecase="classification",
top_features_to_pick=0.10,
random_state=42,
subclass="ignore",
n_jobs=1,
):
self.target = target
self.ml_usecase = ml_usecase
self.top_features_to_pick = 1 - top_features_to_pick
self.random_state = random_state
self.subclass = subclass
self.n_jobs = n_jobs
def fit(self, dataset, y=None):
self.fit_transform(dataset, y=y)
return self
def transform(self, dataset, y=None):
# return the data with onlys specific columns
data = dataset
# self.selected_columns.remove(self.target)
data = data[self.selected_columns_test]
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
def fit_transform(self, dataset, y=None):
dummy_all = dataset.copy()
dummy_all[self.target] = dummy_all[self.target].astype("float32")
# Random Forest
max_fe = min(70, int(np.sqrt(len(dummy_all.columns))))
max_sa = min(1000, int(np.sqrt(len(dummy_all))))
if self.ml_usecase == "classification":
m = rfc(
100,
max_depth=5,
max_features=max_fe,
n_jobs=self.n_jobs,
max_samples=max_sa,
random_state=self.random_state,
)
else:
m = rfr(
100,
max_depth=5,
max_features=max_fe,
n_jobs=self.n_jobs,
max_samples=max_sa,
random_state=self.random_state,
)
m.fit(dummy_all.drop(self.target, axis=1), dummy_all[self.target])
# self.fe_imp_table= pd.DataFrame(m.feature_importances_,columns=['Importance'],index=dummy_all.drop(self.target,axis=1).columns).sort_values(by='Importance',ascending= False)
self.fe_imp_table = pd.DataFrame(
m.feature_importances_,
columns=["Importance"],
index=dummy_all.drop(self.target, axis=1).columns,
)
self.fe_imp_table = self.fe_imp_table[
self.fe_imp_table["Importance"]
>= self.fe_imp_table.quantile(self.top_features_to_pick)[0]
]
top = self.fe_imp_table.index
dummy_all_columns_RF = dummy_all[top].columns
# LightGBM
max_fe = min(70, int(np.sqrt(len(dummy_all.columns))))
max_sa = min(
float(1000 / len(dummy_all)),
float(np.sqrt(len(dummy_all) / len(dummy_all))),
)
if self.ml_usecase == "classification":
m = lgbmc(
n_estimators=100,
max_depth=5,
n_jobs=self.n_jobs,
subsample=max_sa,
random_state=self.random_state,
)
else:
m = lgbmr(
n_estimators=100,
max_depth=5,
n_jobs=self.n_jobs,
subsample=max_sa,
random_state=self.random_state,
)
m.fit(dummy_all.drop(self.target, axis=1), dummy_all[self.target])
# self.fe_imp_table= pd.DataFrame(m.feature_importances_,columns=['Importance'],index=dummy_all.drop(self.target,axis=1).columns).sort_values(by='Importance',ascending= False)
self.fe_imp_table = pd.DataFrame(
m.feature_importances_,
columns=["Importance"],
index=dummy_all.drop(self.target, axis=1).columns,
)
self.fe_imp_table = self.fe_imp_table[
self.fe_imp_table["Importance"]
>= self.fe_imp_table.quantile(self.top_features_to_pick)[0]
]
top = self.fe_imp_table.index
dummy_all_columns_LGBM = dummy_all[top].columns
# we can now select top correlated feature
if self.subclass != "multi":
corr = pd.DataFrame(np.corrcoef(dummy_all.T))
corr.columns = dummy_all.columns
corr.index = dummy_all.columns
# corr = corr[self.target].abs().sort_values(ascending=False)[0:self.top_features_to_pick+1]
corr = corr[self.target].abs()
corr = corr[corr.index != self.target] # drop the target column
corr = corr[corr >= corr.quantile(self.top_features_to_pick)]
corr = pd.DataFrame(dict(features=corr.index, value=corr)).reset_index(
drop=True
)
corr = corr.drop_duplicates(subset="value")
corr = corr["features"]
# corr = pd.DataFrame(dict(features=corr.index,value=corr)).reset_index(drop=True)
# corr = corr.drop_duplicates(subset='value')[0:self.top_features_to_pick+1]
# corr = corr['features']
else:
corr = list()
self.dummy_all_columns_RF = dummy_all_columns_RF
self.dummy_all_columns_LGBM = dummy_all_columns_LGBM
self.corr = corr
self.selected_columns = list(
set(
[self.target]
+ list(dummy_all_columns_RF)
+ list(corr)
+ list(dummy_all_columns_LGBM)
)
)
self.selected_columns_test = (
dataset[self.selected_columns].drop(self.target, axis=1).columns
)
return dataset[self.selected_columns]
# _
# ______________________________________________________________________________________________________________________________________________________
# Boruta Feature Selection algorithm
# Base on: https://github.com/scikit-learn-contrib/boruta_py/blob/master/boruta/boruta_py.py
class Boruta_Feature_Selection(BaseEstimator, TransformerMixin):
"""
Boruta selection algorithm based on borutaPy sklearn-contrib and
<NAME>, https://m2.icm.edu.pl/boruta/
Selects the most important features.
Args:
target (str): target column name
ml_usecase (str): case: classification or regression
top_features_to_pick: to make...
max_iteration {int): overall iterations of shuffle and train forests
alpha {float): p-value on which
the option to favour one measur to another. e.g. if value is .6 , during feature selection tug of war, correlation target measure will have a higher say.
A value of .5 means both measure have equal say
"""
def __init__(
self,
target,
ml_usecase="classification",
top_features_to_pick=1.0,
max_iteration=200,
n_iter_no_change=25,
alpha=0.05,
random_state=42,
subclass="ignore",
n_jobs=1,
):
self.target = target
self.ml_usecase = ml_usecase
self.top_features_to_pick = top_features_to_pick
self.random_state = random_state
self.subclass = subclass
self.max_iteration = max_iteration
self.n_iter_no_change = n_iter_no_change
self.alpha = alpha
self.selected_columns_test = []
self.n_jobs = n_jobs
@property
def selected_columns(self):
return self.selected_columns_test + [self.target]
def fit(self, dataset, y=None):
from .patches.boruta_py import BorutaPyPatched
dummy_data = dataset
X, y = dummy_data.drop(self.target, axis=1), dummy_data[self.target].values
y = y.astype("float32")
X_cols = X.columns
X = X.values
if self.ml_usecase == "classification":
m = rfc(
100,
max_depth=5,
n_jobs=self.n_jobs,
random_state=self.random_state,
class_weight="balanced",
)
else:
m = rfr(
100, max_depth=5, n_jobs=self.n_jobs, random_state=self.random_state,
)
feat_selector = BorutaPyPatched(
m,
n_estimators="auto",
perc=int(self.top_features_to_pick * 100),
max_iter=self.max_iteration,
random_state=self.random_state,
early_stopping=(self.n_iter_no_change > 0),
n_iter_no_change=self.n_iter_no_change,
)
try:
feat_selector.fit(X, y)
self.selected_columns_test = list(X_cols[feat_selector.support_])
except:
# boruta may errors out if all features are selected
self.selected_columns_test = list(X_cols)
return self
def transform(self, dataset, y=None):
if self.target in dataset.columns:
return dataset[self.selected_columns]
else:
return dataset[self.selected_columns_test]
def fit_transform(self, dataset, y=None):
self.fit(dataset, y=y)
return self.transform(dataset, y=y)
# _________________________________________________________________________________________________________________________________________
class Fix_multicollinearity(BaseEstimator, TransformerMixin):
"""
Fixes multicollinearity between predictor variables , also considering the correlation between target variable.
Only applies to regression or two class classification ML use case
Takes numerical and one hot encoded variables only
Args:
threshold (float): The utmost absolute pearson correlation tolerated beyween featres from 0.0 to 1.0
target_variable (str): The target variable/column name
correlation_with_target_threshold: minimum absolute correlation required between every feature and the target variable , default 1.0 (0.0 to 1.0)
correlation_with_target_preference: float (0.0 to 1.0), default .08 ,while choosing between a pair of features w.r.t multicol & correlation target , this gives
the option to favour one measur to another. e.g. if value is .6 , during feature selection tug of war, correlation target measure will have a higher say.
A value of .5 means both measure have equal say
"""
# mamke a constructer
def __init__(
self,
threshold,
target_variable,
correlation_with_target_threshold=0.0,
correlation_with_target_preference=1.0,
):
self.threshold = threshold
self.target_variable = target_variable
self.correlation_with_target_threshold = correlation_with_target_threshold
self.correlation_with_target_preference = correlation_with_target_preference
self.target_corr_weight = correlation_with_target_preference
self.multicol_weight = 1 - correlation_with_target_preference
# Make fit method
def fit(self, data, y=None):
"""
Args:
data = takes preprocessed data frame
Returns:
None
"""
if data[self.target_variable].dtype not in ["int32", "int64", "float32", "float64"]:
raise ValueError('dtype for the target variable should be int32, int64, float32, or float64 only')
# global data1
data1 = data.select_dtypes(include=["int32", "int64", "float32", "float64"])
# try:
# self.data1 = self.data1.astype('float16')
# except:
# None
# make an correlation db with abs correlation db
# self.data_c = self.data1.T.drop_duplicates()
# self.data1 = self.data_c.T
corr = pd.DataFrame(np.corrcoef(data1.T))
corr.columns = data1.columns
corr.index = data1.columns
# corr_matrix = abs(data1.corr())
corr_matrix = abs(corr)
# for every diagonal value, make it Nan
corr_matrix.values[
tuple([np.arange(corr_matrix.shape[0])] * 2)
] = np.NaN
# Now Calculate the average correlation of every feature with other, and get a pandas data frame
avg_cor = pd.DataFrame(corr_matrix.mean())
avg_cor["feature"] = avg_cor.index
avg_cor.reset_index(drop=True, inplace=True)
avg_cor.columns = ["avg_cor", "features"]
# Calculate the correlation with the target
targ_cor = pd.DataFrame(corr_matrix[self.target_variable].dropna())
targ_cor["feature"] = targ_cor.index
targ_cor.reset_index(drop=True, inplace=True)
targ_cor.columns = ["target_variable", "features"]
# Now, add a column for variable name and drop index
corr_matrix["column"] = corr_matrix.index
corr_matrix.reset_index(drop=True, inplace=True)
# now we need to melt it , so that we can correlation pair wise , with two columns
cols = corr_matrix.column
melt = (
corr_matrix.melt(id_vars=["column"], value_vars=cols)
.sort_values(by="value", ascending=False)
.dropna()
)
# now bring in the avg correlation for first of the pair
merge = pd.merge(
melt, avg_cor, left_on="column", right_on="features"
).drop("features", axis=1)
# now bring in the avg correlation for second of the pair
merge = pd.merge(
merge, avg_cor, left_on="variable", right_on="features"
).drop("features", axis=1)
# now bring in the target correlation for first of the pair
merge = pd.merge(
merge, targ_cor, left_on="column", right_on="features"
).drop("features", axis=1)
# now bring in the avg correlation for second of the pair
merge = pd.merge(
merge, targ_cor, left_on="variable", right_on="features"
).drop("features", axis=1)
# sort and save
merge = merge.sort_values(by="value", ascending=False)
# we need to now eleminate all the pairs that are actually duplicate e.g cor(x,y) = cor(y,x) , they are the same , we need to find these and drop them
merge["all_columns"] = merge["column"] + merge["variable"]
# this puts all the coresponding pairs of features togather , so that we can only take one, since they are just the duplicates
merge["all_columns"] = [sorted(i) for i in merge["all_columns"]]
# now sort by new column
merge = merge.sort_values(by="all_columns")
# take every second colums
merge = merge.iloc[::2, :]
# make a ranking column to eliminate features
merge["rank_x"] = round(
self.multicol_weight * (merge["avg_cor_y"] - merge["avg_cor_x"])
+ self.target_corr_weight
* (merge["target_variable_x"] - merge["target_variable_y"]),
6,
) # round it to 6 digits
## Now there will be rows where the rank will be exactly zero, these is where the value (corelartion between features) is exactly one ( like price and price^2)
## so in that case , we can simply pick one of the variable
# but since , features can be in either column, we will drop one column (say 'column') , only if the feature is not in the second column (in variable column)
# both equations below will return the list of columns to drop from here
# this is how it goes
## For the portion where correlation is exactly one !
one = merge[merge["rank_x"] == 0]
# this portion is complicated
# table one have all the paired variable having corelation of 1
# in a nutshell, we can take any column (one side of pair) and delete the other columns (other side of the pair)
# however one varibale can appear more than once on any of the sides , so we will run for loop to find all pairs...
# here it goes
# take a list of all (but unique ) variables that have correlation 1 for eachother, we will make two copies
u_all = list(
pd.unique(pd.concat((one["column"], one["variable"]), axis=0))
)
u_all_1 = list(
pd.unique(pd.concat((one["column"], one["variable"]), axis=0))
)
# take a list of features (unique) for the first side of the pair
u_column = pd.unique(one["column"])
# now we are going to start picking each variable from one column (one side of the pair) , check it against the other column (other side of the pair)
# to pull all coresponding / paired variables , and delete thoes newly varibale names from all unique list
for i in u_column:
# print(i)
r = one[one["column"] == i]["variable"]
for q in r:
if q in u_all:
# print("_"+q)
u_all.remove(q)
# now the unique column contains the varibales that should remain, so in order to get the variables that should be deleted :
to_drop = list(set(u_all_1) - set(u_all))
# to_drop_a =(list(set(one['column'])-set(one['variable'])))
# to_drop_b =(list(set(one['variable'])-set(one['column'])))
# to_drop = to_drop_a + to_drop_b
## now we are to treat where rank is not Zero and Value (correlation) is greater than a specific threshold
non_zero = merge[
(merge["rank_x"] != 0.0) & (merge["value"] >= self.threshold)
]
# pick the column to delete
non_zero_list = list(
np.where(
non_zero["rank_x"] < 0,
non_zero["column"],
non_zero["variable"],
)
)
# add two list
self.to_drop = to_drop + non_zero_list
# make sure that target column is not a part of the list
try:
self.to_drop.remove(self.target_variable)
except:
pass
# now we want to keep only the columns that have more correlation with traget by a threshold
self.to_drop_taret_correlation = []
if self.correlation_with_target_threshold != 0.0:
corr = pd.DataFrame(
np.corrcoef(data.drop(self.to_drop, axis=1).T),
columns=data.drop(self.to_drop, axis=1).columns,
index=data.drop(self.to_drop, axis=1).columns,
)
self.to_drop_taret_correlation = corr[self.target_variable].abs()
# to_drop_taret_correlation = data.drop(self.to_drop,axis=1).corr()[target_variable].abs()
self.to_drop_taret_correlation = self.to_drop_taret_correlation[
self.to_drop_taret_correlation < self.correlation_with_target_threshold
]
self.to_drop_taret_correlation = list(self.to_drop_taret_correlation.index)
# to_drop = corr + to_drop
try:
self.to_drop_taret_correlation.remove(self.target_variable)
except:
pass
return self
# now Transform
def transform(self, dataset, y=None):
"""
Args:f
data = takes preprocessed data frame
Returns:
data frame
"""
data = dataset
data = data.drop(self.to_drop, axis=1)
# now drop less correlated data
data.drop(self.to_drop_taret_correlation, axis=1, inplace=True, errors="ignore")
return data
# fit_transform
def fit_transform(self, data, y=None):
"""
Args:
data = takes preprocessed data frame
Returns:
data frame
"""
self.fit(data)
return self.transform(data)
# ____________________________________________________________________________________________________________________________________________________________________
# handle perfect multicollinearity
class Remove_100(BaseEstimator, TransformerMixin):
"""
- Takes DF, return data frame while removing features that are perfectly correlated (droping one)
"""
def __init__(self, target):
self.target = target
self.columns_to_drop = []
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
return dataset.drop(self.columns_to_drop, axis=1)
def fit_transform(self, dataset, y=None):
data = dataset
targetless_data = data.drop(self.target, axis=1)
# correlation should be calculated between at least two features, if there is only 1, there is nothing to delete
if len(targetless_data.columns) <= 1:
return data
corr = pd.DataFrame(np.corrcoef(targetless_data.T))
corr.columns = targetless_data.columns
corr.index = targetless_data.columns
corr_matrix = abs(corr)
# Now, add a column for variable name and drop index
corr_matrix["column"] = corr_matrix.index
corr_matrix.reset_index(drop=True, inplace=True)
# now we need to melt it , so that we can correlation pair wise , with two columns
cols = corr_matrix.column
melt = corr_matrix.melt(id_vars=["column"], value_vars=cols).sort_values(
by="value", ascending=False
) # .dropna()
melt["value"] = round(melt["value"], 2) # round it to two digits
# now pick variables where value is one and 'column' != variabe ( both columns are not same)
c1 = melt["value"] == 1.00
c2 = melt["column"] != melt["variable"]
melt = melt[((c1 == True) & (c2 == True))]
# we need to now eleminate all the pairs that are actually duplicate e.g cor(x,y) = cor(y,x) , they are the same , we need to find these and drop them
melt["all_columns"] = melt["column"] + melt["variable"]
# this puts all the coresponding pairs of features togather , so that we can only take one, since they are just the duplicates
melt["all_columns"] = [sorted(i) for i in melt["all_columns"]]
# # now sort by new column
melt = melt.sort_values(by="all_columns")
# # take every second colums
melt = melt.iloc[::2, :]
# lets keep the columns on the left hand side of the table
self.columns_to_drop = melt["variable"]
return data.drop(self.columns_to_drop, axis=1)
# _______________________________________________________________________________________________________________________________________________________________________________________________
# custome DFS
class DFS_Classic(BaseEstimator, TransformerMixin):
"""
- Automated feature interactions using multiplication, division , addition & substraction
- Only accepts numeric / One Hot Encoded features
- Takes DF, return same DF
- for Multiclass classification problem , set subclass arg as 'multi'
"""
def __init__(
self,
target,
ml_usecase="classification",
interactions=["multiply", "divide", "add", "subtract"],
top_n_correlated=0.05,
random_state=42,
subclass="ignore",
n_jobs=1,
):
self.target = target
self.interactions = interactions
self.top_n_correlated = top_n_correlated # (this will be 1- top_features , but handled in the Advance_feature_selection )
self.ml_usecase = ml_usecase
self.random_state = random_state
self.subclass = subclass
self.n_jobs = n_jobs
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
data_without_target = data.drop(self.target, axis=1, errors="ignore")
# for multiplication:
# we need bot catagorical and numerical columns
if "multiply" in self.interactions:
data_multiply = pd.concat(
[
data_without_target.mul(col[1], axis="index")
for col in data_without_target.iteritems()
],
axis=1,
)
data_multiply.columns = [
"_multiply_".join([i, j])
for j in data_without_target.columns
for i in data_without_target.columns
]
# we dont need to apply rest of conditions
data_multiply.index = data.index
else:
data_multiply = pd.DataFrame()
# for division, we only want it to apply to numerical columns
if "divide" in self.interactions:
data_divide = pd.concat(
[
data_without_target[self.numeric_columns].div(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_divide.columns = [
"_divide_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
data_divide.replace([np.inf, -np.inf], 0, inplace=True)
data_divide.fillna(0, inplace=True)
data_divide.index = data.index
else:
data_divide = pd.DataFrame()
# for addition, we only want it to apply to numerical columns
if "add" in self.interactions:
data_add = pd.concat(
[
data_without_target[self.numeric_columns].add(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_add.columns = [
"_add_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
data_add.index = data.index
else:
data_add = pd.DataFrame()
# for substraction, we only want it to apply to numerical columns
if "subtract" in self.interactions:
data_substract = pd.concat(
[
data_without_target[self.numeric_columns].sub(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_substract.columns = [
"_subtract_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
data_substract.index = data.index
else:
data_substract = pd.DataFrame()
# get all the dummy data combined
dummy_all = pd.concat(
(data, data_multiply, data_divide, data_add, data_substract), axis=1
)
del data_multiply
del data_divide
del data_add
del data_substract
# now only return the columns we want:
dummy_all = dummy_all[self.columns_to_keep]
if self.target in dataset.columns:
dummy_all[self.target] = dataset[self.target]
return dummy_all
def fit_transform(self, dataset, y=None):
data = dataset
data_without_target = data.drop(self.target, axis=1, errors="ignore")
# we need to seperate numerical and ont hot encoded columns
# self.ohe_columns = [i if ((len(data[i].unique())==2) & (data[i].unique()[0] in [0,1]) & (data[i].unique()[1] in [0,1]) ) else None for i in data.drop(self.target,axis=1).columns]
self.ohe_columns = [
i
for i in data.columns
if data[i].nunique() == 2
and data[i].unique()[0] in [0, 1]
and data[i].unique()[1] in [0, 1]
]
# self.ohe_columns = [i for i in self.ohe_columns if i is not None]
self.numeric_columns = [
i for i in data_without_target.columns if i not in self.ohe_columns
]
target_variable = data[[self.target]]
# for multiplication:
# we need bot catagorical and numerical columns
if "multiply" in self.interactions:
data_multiply = pd.concat(
[
data_without_target.mul(col[1], axis="index")
for col in data_without_target.iteritems()
],
axis=1,
)
data_multiply.columns = [
"_multiply_".join([i, j])
for j in data_without_target.columns
for i in data_without_target.columns
]
# we dont need columns that are self interacted
col = [
"_multiply_".join([i, j])
for j in data_without_target.columns
for i in data_without_target.columns
if i != j
]
data_multiply = data_multiply[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [
i for i in data_multiply.columns if np.nansum(data_multiply[i]) != 0
]
data_multiply = data_multiply[col1]
data_multiply.index = data.index
else:
data_multiply = pd.DataFrame()
# for division, we only want it to apply to numerical columns
if "divide" in self.interactions:
data_divide = pd.concat(
[
data_without_target[self.numeric_columns].div(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_divide.columns = [
"_divide_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
# we dont need columns that are self interacted
col = [
"_divide_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
if i != j
]
data_divide = data_divide[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [i for i in data_divide.columns if np.nansum(data_divide[i]) != 0]
data_divide = data_divide[col1]
# additionally we need to fill anll the possible NaNs
data_divide.replace([np.inf, -np.inf], 0, inplace=True)
data_divide.fillna(0, inplace=True)
data_divide.index = data.index
else:
data_divide = pd.DataFrame()
# for addition, we only want it to apply to numerical columns
if "add" in self.interactions:
data_add = pd.concat(
[
data_without_target[self.numeric_columns].add(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_add.columns = [
"_add_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
# we dont need columns that are self interacted
col = [
"_add_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
if i != j
]
data_add = data_add[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [i for i in data_add.columns if np.nansum(data_add[i]) != 0]
data_add = data_add[col1]
data_add.index = data.index
else:
data_add = pd.DataFrame()
# for substraction, we only want it to apply to numerical columns
if "subtract" in self.interactions:
data_substract = pd.concat(
[
data_without_target[self.numeric_columns].sub(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_substract.columns = [
"_subtract_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
# we dont need columns that are self interacted
col = [
"_subtract_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
if i != j
]
data_substract = data_substract[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [
i for i in data_substract.columns if np.nansum(data_substract[i]) != 0
]
data_substract = data_substract[col1]
data_substract.index = data.index
else:
data_substract = pd.DataFrame()
# get all the dummy data combined
dummy_all = pd.concat(
(data_multiply, data_divide, data_add, data_substract), axis=1
)
del data_multiply
del data_divide
del data_add
del data_substract
dummy_all[self.target] = target_variable
self.dummy_all = dummy_all
# apply advanced feature selectio
afs = Advanced_Feature_Selection_Classic(
target=self.target,
ml_usecase=self.ml_usecase,
top_features_to_pick=self.top_n_correlated,
random_state=self.random_state,
subclass=self.subclass,
n_jobs=self.n_jobs,
)
dummy_all_t = afs.fit_transform(dummy_all)
data_fe_final = pd.concat(
(data, dummy_all_t), axis=1
) # self.data_fe[self.corr]
# # making sure no duplicated columns are there
data_fe_final = data_fe_final.loc[
:, ~data_fe_final.columns.duplicated()
] # new added
# # remove thetarget column
# # this is the final data we want that includes original , fe data plus impact of top n correlated
self.columns_to_keep = data_fe_final.drop(self.target, axis=1).columns
del dummy_all
del dummy_all_t
return data_fe_final
# ____________________________________________________________________________________________________________________________________________________________________
# Empty transformer
class Empty(BaseEstimator, TransformerMixin):
"""
- Takes DF, return same DF
"""
def fit(self, data, y=None):
return self
def transform(self, data, y=None):
return data
def fit_transform(self, data, y=None):
return self.transform(data)
# ____________________________________________________________________________________________________________________________________
# reduce feature space
class Reduce_Dimensions_For_Supervised_Path(BaseEstimator, TransformerMixin):
"""
- Takes DF, return same DF with different types of dimensionality reduction modles (pca_liner , pca_kernal, tsne , pls, incremental)
- except pca_liner, every other method takes integer as number of components
- only takes numeric variables (float & One Hot Encoded)
- it is intended to solve supervised ML usecases , such as classification / regression
"""
def __init__(
self,
target,
method="pca_liner",
variance_retained_or_number_of_components=0.99,
random_state=42,
):
self.target = target
self.variance_retained_or_number_of_components = (
variance_retained_or_number_of_components
)
self.random_state = random_state
self.method = method
def fit(self, X, y=None):
self.fit_transform(X, y=y)
return self
def transform(self, X, y=None):
data = X
if self.method in [
"pca_liner",
"pca_kernal",
"tsne",
"incremental",
]: # if self.method in ['pca_liner' , 'pca_kernal', 'tsne' , 'incremental','psa']
data = data.drop(self.target, axis=1, errors="ignore")
data_pca = self.pca.transform(data)
data_pca = pd.DataFrame(data_pca)
data_pca.columns = [
"Component_" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)
]
data_pca.index = data.index
if self.target in X.columns:
data_pca[self.target] = X[self.target]
return data_pca
else:
return X
def fit_transform(self, X, y=None):
data = X
if self.method == "pca_liner":
self.pca = PCA(
self.variance_retained_or_number_of_components,
random_state=self.random_state,
)
# fit transform
data_pca = self.pca.fit_transform(data.drop(self.target, axis=1, errors='ignore'))
data_pca = pd.DataFrame(data_pca)
data_pca.columns = [
"Component_" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)
]
data_pca.index = data.index
if self.target in data.columns:
data_pca[self.target] = data[self.target]
return data_pca
elif self.method == "pca_kernal": # take number of components only
self.pca = KernelPCA(
self.variance_retained_or_number_of_components,
kernel="rbf",
random_state=self.random_state,
n_jobs=-1,
)
# fit transform
data_pca = self.pca.fit_transform(data.drop(self.target, axis=1, errors='ignore'))
data_pca = | pd.DataFrame(data_pca) | pandas.DataFrame |
import json
import logging
import os
import pathlib
import sys
from collections import OrderedDict
from datetime import datetime
import click
import humanfriendly
import pandas
__version__ = '1.1.5'
logger = logging.getLogger()
@click.group()
@click.option('--debug', is_flag=True)
@click.pass_context
def cli(ctx, debug):
"""
This is a tool to generate an excel file based on a provided source excel and transformation mapping
"""
log_format = '%(asctime)s|%(levelname)s|%(name)s|(%(funcName)s):-%(message)s'
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO, stream=sys.stdout, format=log_format)
if ctx.invoked_subcommand not in ['version']:
logger.info(f'{"-" * 20} Starting Logging for {ctx.invoked_subcommand} (v{__version__}) {"-" * 20}')
def process_column_mappings(source_df, column_mappings):
out_df = source_df.copy(deep=True)
name_map = {}
exclude_columns = []
pending_columns = False
for x in column_mappings:
if x[0][:3] == '[-]':
exclude_columns.append(x[0][3:])
elif x[0] == '*':
pending_columns = True
else:
name_map.update({x[0]: x[1] if x[1] != '_' else x[0]})
index_map = {'_': []}
for mapping in column_mappings:
index = mapping[2]
value = mapping[0] if mapping[1] == '_' else mapping[1]
if index == '_':
if value != '*' and value[:3] != '[-]':
index_map['_'].append(value)
continue
if index not in index_map:
index_map[index] = value
exclude_columns.append(value)
else:
raise Exception(f'Cannot have same column index for multiple columns, please check your column mapping\n'
f'{index=}, {mapping=}')
out_df = out_df.rename(columns=name_map)
pending_columns_list = list(set(out_df.columns).difference(exclude_columns)) if pending_columns else []
return {'df': out_df, 'index_map': index_map, 'pending_columns': pending_columns_list}
def process_mappings(source_df_dict, mappings):
worksheets_dict = {}
for mapping in mappings:
count = -1
for sheet_identifier, sheet_mapping in mapping.items():
count += 1
entry = get_dict_entry(count, sheet_identifier, source_df_dict)
sheet_name = entry.get('name')
if sheet_name not in worksheets_dict:
# noinspection PyArgumentList
worksheets_dict.update({sheet_name: {
'source': entry.get('item').copy(deep=True),
'dest': {}
}})
dest_sheet_name = sheet_mapping.get('dest_worksheet_name') or sheet_name
dest_sheet_name = sheet_name if dest_sheet_name == '_' else dest_sheet_name
mapping_processed = process_column_mappings(worksheets_dict.get(sheet_name).get('source'),
sheet_mapping.get('columns'))
mapping_processed.update({'merge_columns': sheet_mapping.get('merge_columns')})
worksheets_dict[sheet_name]['dest'].update({dest_sheet_name: mapping_processed})
return worksheets_dict
@cli.command()
@click.argument('source', nargs=-1)
@click.argument('mapping')
@click.option('-o', '--output', help='relative or absolute path to output file')
@click.pass_context
def transform(ctx, **kwargs):
transform_spreadsheets(**kwargs)
def transform_spreadsheets(source, mapping, output):
"""Produces a new spreadsheet with transformation mapping applied"""
s_time = datetime.now()
try:
source_paths = [get_path(x) for x in source]
mapping_path = get_path(mapping, make_dir=False)
output_path = get_path(output or 'excel_transform_output.xlsx', make_dir=True)
source_dfs = OrderedDict()
try:
logger.info('processing mappings file')
with open(mapping_path) as f:
mappings = json.load(f)
except Exception as e:
logger.critical(f'Encountered error trying to read the mapping file:\n{e}')
sys.exit()
logger.info('processing source files')
for source_path in source_paths:
try:
source_dfs.update({source_path.stem: | pandas.read_excel(source_path, sheet_name=None) | pandas.read_excel |
import pandas as pd # Data tables
import numpy as np # Arrays
from math import sqrt, atan, log, exp, sin, cos, tan
from scipy.integrate import odeint
from scipy.optimize import *
pi = np.pi
month = 7
# "!Boundary layers"
h_r = 5 # [W/m^2-K]
h_c = 7 # [W/m^2-K]
# h_in=h_r + h_c
h_in = 8 # [W/m^2-K]
h_out=17.5 # [W/m^2-K]
# "Air properties:"
v_a=0.8401 # [m^3/kg] "specific volume of humid air per kg of dry air"
c_p_a=1020 # [J/kg-K] "specific heat capacity of humid air per kg of dry air"
sigma_boltzman=5.67E-8
# "! Room Data"
# "Room height"
Height_room=2.7 # [m]
# "Lenght on 6h-0h direction"
Lenght_6h_0h=5.4 # [m]
# "Lenght on 9h-3h direction"
Lenght_9h_3h=1.8 # [m]
# "! Windows areas supposed to be included in 0h wall"
# Height_wd_0 =1.2 # [m]
Breadth_wd_0 =1.6 # [m]
Height_wd_sill_0 =0.8 #[m]
thickness_gl = 0.006 #[m]
# "! Windows parameters"
U_wd=1.49 # [W/m^2-K]
SF_gl=0.6
f_frame=0.3
rho_blind=0.64
rho_ground=0
slope_deg=90
# glazing
rho_glazing=2500 #[kg/m^3]
lambda_glazing=1.0 #[W/m.K]
c_p_glazing=750 #[J/kg.K]
# concrete bloc
rho_concrete_bloc=1200 #[kg/m^3]
lambda_concrete_bloc=1.273 #[W/m.K]
c_p_concrete_bloc=840 #[J/kg.K]
# Hollow concrete
rho_hollow_concrete=1600 #[kg/m^3]
lambda_hollow_concrete=1.182 #[W/m.K]
c_p_hollow_concrete=840 #[J/kg.K]
# plaster
e_suspended_ceiling=0.01 #[m]
lambda_suspended_ceiling=0.2 #[W/m.K]
rho_suspended_ceiling=1300 #[kg/m^3]
c_p_suspended_ceiling=840 #[J/kg.K]
# wood panel
e_raised_floor=0.02 #[m]
lambda_raised_floor=0.2 #[W/m.K]
rho_raised_floor=1600 #[kg/m^3]
c_p_raised_floor=800 #[J/kg.K]
# carpet
e_carpet=0.02 #[m]
lambda_carpet=0.2 #[W/m.K]
rho_carpet=1600 #[kg/m^3]
c_p_carpet=800 #[J/kg.K]
# blind
e_blind=0.002 #[m]
lambda_blind=0.2 #[W/m.K]
rho_blind=1600 #[kg/m^3]
c_p_blind=800 #[J/kg.K]
U_half_carpet = 2 * lambda_carpet/e_carpet
# Air layer
R_air_layer=0.17 #[m^2.K/W] #"0.17 for air +0.18 for insulation#"
U_air_layer=1/R_air_layer
thickness_air_layer=0.06#[m]
rho_air_layer=1.2 #[kg/m^3]
lambda_air_layer=thickness_air_layer/R_air_layer
c_p_air_layer=1060 #[J/kg.K]
def Qdot(iflag_internal_blind, iflag_suspended_ceiling, iflag_raised_floor, iflag_carpet, \
Q_dot_rad_max, Q_dot_conv_max, hour_start_occ, hour_stop_occ, \
hour_start_plant, hour_stop_plant, M_A, H_B, Q_dot_sol_wd_max_no_shading, \
Q_dot_sol_wd, month):
# "!Boundary layers"
# h_r=5 # [W/m^2-K]
# h_c = 7 # [W/m^2-K]
# h_in=h_r + h_c
# "! Room Data"
# "Room height"
Height_room=2.7 # [m]
# "Lenght on 6h-0h direction"
Lenght_6h_0h=5.4 # [m]
# "Lenght on 9h-3h direction"
Lenght_9h_3h=1.8 # [m]
# "! Windows areas supposed to be included in 0h wall"
# Height_wd_0 =1.2 # [m]
Breadth_wd_0 =1.6 # [m]
Height_wd_sill_0 =0.8 #[m]
H_B_wd=max(0.001,H_B)
Height_wd_0=H_B_wd*Breadth_wd_0
if Height_wd_0 <= Height_room-0.2:
Height_wd = Height_wd_0
Breadth_wd = Breadth_wd_0
else:
Height_wd = Height_room-0.2
Breadth_wd = (Height_room-0.2) / H_B_wd
if Height_wd <= Height_room-1:
Height_wd_sill = Height_wd_sill_0
else:
Height_wd_sill = Height_room-Height_wd-0.1
# "!Window area"
area_wd=Height_wd*Breadth_wd
area_wall_0h=max(0,Height_room*Lenght_9h_3h-area_wd)
area_wall_3h=Height_room*Lenght_6h_0h
area_wall_6h=Height_room*Lenght_9h_3h
area_wall_9h=Height_room*Lenght_6h_0h
area_ceiling=Lenght_9h_3h*Lenght_6h_0h
area_floor=Lenght_9h_3h*Lenght_6h_0h
n_walls=6
area_wall = np.array([area_wall_0h, area_wall_3h, area_wall_6h, area_wall_9h, area_ceiling, area_floor])
area_wall_wd=np.sum(area_wall)+area_wd
#"! Estimated Floor Structure Mass and Wall Mass per square meter of area"
M_per_A_tot=max(10,M_A)
M_tot=max(100,area_floor*M_per_A_tot)
M_per_A_wall = M_tot/((area_wall[0]+np.sum(area_wall[1:4])/2)+2*(area_ceiling/2+area_floor/2))
M_per_A_floor=2*M_per_A_wall
#!Indoor air capacity"
C_a_in=5*Height_room*Lenght_6h_0h*Lenght_9h_3h*c_p_a/v_a
# Glazing capacity
C_gl = area_wd * (1-f_frame) * thickness_gl * rho_glazing * c_p_glazing
# Suspended ceiling
C_A_ce = e_suspended_ceiling*rho_suspended_ceiling*c_p_suspended_ceiling
# Raised floor
C_A_fl = e_raised_floor*rho_raised_floor*c_p_raised_floor
# Carpet
C_A_cp = e_carpet*rho_carpet*c_p_carpet
# Blind
C_bl = area_wd * e_blind*rho_blind*c_p_blind
# "!Total number of finite element layers, with two degree two elements by layer"
n_layers = 2
nl=n_layers
# "! internal vertical wall layers"
thickness_wall= (M_per_A_wall/2)/rho_concrete_bloc
thickness_wall_int= thickness_wall/n_layers * np.ones(n_layers)
lambda_wall_int= lambda_concrete_bloc * np.ones(n_layers)
rho_wall_int= rho_concrete_bloc * np.ones(n_layers)
c_layer_wall_int= c_p_concrete_bloc * np.ones(n_layers)
# "! floor layers"
thickness_floor=(M_per_A_floor/2)/rho_hollow_concrete
thickness_floor_int= thickness_floor/n_layers * np.ones(n_layers)
lambda_floor_int= lambda_hollow_concrete * np.ones(n_layers)
rho_floor_int= rho_hollow_concrete * np.ones(n_layers)
c_layer_floor_int= c_p_hollow_concrete * np.ones(n_layers)
# Reverse arrays
thickness_floor_int_2 =thickness_floor_int[::-1]
lambda_floor_int_2 =lambda_floor_int[::-1]
rho_floor_int_2 =rho_floor_int[::-1]
c_layer_floor_int_2 =c_layer_floor_int[::-1]
# Matrixes of vertical wall layers"
n_elem,R_nobl_wall,L_wall,C_wall = wall_matrix(n_layers,thickness_wall_int,lambda_wall_int,rho_wall_int,c_layer_wall_int)
# Matrixes of floor layers"
n_elem,R_nobl_floor,L_floor,C_floor = wall_matrix(n_layers,thickness_floor_int,lambda_floor_int,rho_floor_int,c_layer_floor_int)
n_nodes=2*n_elem+1
shape = (n_walls, n_nodes, n_nodes)
C_matrix = np.zeros(shape) ; L_matrix = np.zeros(shape)
# External wall
C_matrix[0] = C_wall; L_matrix[0] = L_wall
# Internal walls
C_matrix[1] = C_wall; L_matrix[1] = L_wall
C_matrix[2] = C_wall; L_matrix[2] = L_wall
C_matrix[3] = C_wall; L_matrix[3] = L_wall
# Ceiling
C_matrix[4] = C_wall; L_matrix[4] = L_wall
# Floor
C_matrix[5] = C_floor; L_matrix[5] = L_floor
# "! ComputeView factor from walls to walls, from walls to window and from window to walls"
FV_wall,FV_to_wd,FV_wd_to = View_factors(Lenght_6h_0h,Lenght_9h_3h,Height_room,Breadth_wd,Height_wd_sill,Height_wd)
# Boundary layers"
Ah_c_wall=h_c*area_wall
Ah_r_wall_to_wd=h_r*area_wall*FV_to_wd
Ah_r_wd_to_wall=h_r*area_wd*FV_wd_to
Ah_r_wall=h_r*np.diag(area_wall) @ FV_wall
Ah_c_wd=area_wd*h_c
Ah_c_internal_blind=2*area_wd*h_c
Ah_r_wd_internal_blind=area_wd*h_r
#!Window"
R_wd_no_in_bl=max(0,1/U_wd-1/h_in)
U_wd_no_in_bl=1/R_wd_no_in_bl
AU_wd_no_in_bl=area_wd*U_wd_no_in_bl
if iflag_raised_floor==1 and iflag_carpet==1:
iflag_carpet=0
hour_start_coolingplant=min(24,max(0,hour_start_plant))
hour_stop_coolingplant=min(24,max(0,hour_stop_plant))
hour_start_occupancy=min(24,max(0,hour_start_occ))
hour_stop_occupancy=min(24,max(0,hour_stop_occ))
# "!Set points"
t_out=26 #[°C]
t_a_in_set=26 #[°C]
t_init=26 #[°C]
C_t_in=2 #[K^-1]
# Initial conditions"
t_a_in_init=t_a_in_set
U_c_in_init=C_a_in*t_init
# "! Cooling system sizing"
Q_dot_cooling_max=Q_dot_sol_wd_max_no_shading + Q_dot_rad_max + Q_dot_conv_max
# Simulation period
n_day_sim=3
hour_start=0
n_hours_sim=24*n_day_sim
hour_stop=hour_start + n_hours_sim
tau_initial=hour_start*3600
tau_final=hour_stop*3600
DELTAtau=600 * 2 #[s]
# Time in s : Create an array of evenly-spaced values
tau = np.arange(tau_initial,tau_final+1,DELTAtau)
# Hour and Day from the start of the simulation
hour = tau/3600
hour_per_0 = hour-24*np.trunc(hour/24)
# np.choose(condition,[action if condition = 0 or false, action if condition = 1 or true])
# np.choose(array, [action if condition = 0, action if condition = 1 , action if condition = 2 ...)])
hour_per=np.choose(hour_per_0 > 0.000001,[24,hour_per_0])
day = hour/24
day_int_0 = np.trunc(hour/24)+1
day_int = day_int_0-1
# Sarting hour in sun data table according to month of simulation
month_start=max(1,min(12,month))
hour_start = np.zeros(13)
hour_start[1]=1+15*24; hour_start[2]=1+(31+15)*24; hour_start[3]=1+(31+28+15)*24; hour_start[4]=1+(2*31+28+15)*24; hour_start[5]=1+(2*31+28+30+15)*24; hour_start[6]=1+(3*31+28+30+15)*24
hour_start[7]=1+(3*31+28+2*30+15)*24; hour_start[8]=1+(4*31+28+2*30+15)*24; hour_start[9]=1+(5*31+28+2*30+15)*24; hour_start[10]=1+(5*31+28+3*30+15)*24
hour_start[11]=1+(6*31+28+3*30+15)*24; hour_start[12]=1+(6*31+28+4*30+15)*24
# Hour and Day from the start of the year (simulation starts at 15th of the considered month)
hour_yr = hour + float(hour_start[month]) - 1
day_yr = hour_yr/24
# #Gains factor
# f_gains = np.where(Q_dot_sol_wd_max_no_shading > 0.1, Q_dot_sol_wd_no_shading/Q_dot_sol_wd_max_no_shading, 0)
# Plant on/off
# Smooth starting of the system when intermittent cooling is performed
# hour_start_occupancy = hour_start_coolingplant
f_plant=np.asarray([plant(hi,hour_start_coolingplant,hour_stop_coolingplant,hour_start_occupancy) for hi in hour_per])
#!Occupancy heat gains#
f_gains_occ=np.asarray([occupancy(hi,hour_start_occupancy,hour_stop_occupancy) for hi in hour_per])
# Heat gains
#! Share internal heat gains into radiative and convective#
Q_dot_sensible_gains_rad=f_gains_occ*Q_dot_rad_max
Q_dot_sensible_gains_conv=f_gains_occ*Q_dot_conv_max
#! Radiative sensible heat gains shared as function of walls areas#
if area_wall_wd>0:
Q_dot_rad_wall= np.asarray([np.asarray([x*y/area_wall_wd for y in Q_dot_sensible_gains_rad]) for x in area_wall])
Q_dot_rad_wd=Q_dot_sensible_gains_rad*area_wd/area_wall_wd
else:
Q_dot_rad_wall= np.asarray([np.asarray([x*y*0 for y in Q_dot_sensible_gains_rad]) for x in area_wall])
Q_dot_rad_wd= Q_dot_sensible_gains_rad*0
# Radiative sensible solar heat gains on floor except if there is an internal blind
# Q_dot_rad_wall= np.zeros((n_walls, len(tau)))
# Q_dot_rad_wd= np.zeros((len(tau)))
Q_dot_rad_wall[5,:] = Q_dot_rad_wall[5,:] + (1-iflag_internal_blind)*Q_dot_sol_wd
C_t_in = 2
tau_0 = tau[0]
t_a_in_0 = t_init
t_s_wd_0 = t_init
t_s_ce_0 = t_init
t_s_fl_0 = t_init
t_s_cp_0 = t_init
t_s_bl_0 = t_init
Ti0 = [t_a_in_0 ]
Ti0.extend([t_s_wd_0 ])
Ti0.extend([t_s_ce_0 ])
Ti0.extend([t_s_fl_0 ])
Ti0.extend([t_s_cp_0 ])
Ti0.extend([t_s_bl_0 ])
Ti0.extend(t_init * np.ones(n_nodes * n_walls))
Q_dot_cooling_v = []
t_a_in_set = 26.01
def model_dTi_t(Ti, tau):
T1 = Ti[0]
T2 = Ti[1]
T3 = Ti[2]
T4 = Ti[3]
T5 = Ti[4]
T6 = Ti[5]
Tw = Ti[6:].reshape((n_walls, n_nodes))
ind = int((tau - tau_0) /DELTAtau)
if ind>len(f_plant)-1: ind=len(f_plant)-1
# Internal air capacity heat balance#
t_a_in = T1
if (C_t_in*(t_a_in-t_a_in_set) > 0 ) and (C_t_in*(t_a_in-t_a_in_set) < 1 ) :
X_cooling=C_t_in*(t_a_in-t_a_in_set)
elif C_t_in*(t_a_in-t_a_in_set) > 1 :
X_cooling=1
else:
X_cooling=0
Q_dot_cooling=f_plant[ind]*X_cooling*Q_dot_cooling_max
Q_dot_cooling_v.append(tau)
# Glazing temperature
t_s_wd = T2
# Suspended ceiling temperature
t_s_ce = T3
# Raised floor temperature
t_s_fl = T4
# Carpet temperature
t_s_cp = T5
# Internal blind temperature
t_s_bl = T6
# Radiative and convective heat exchanges between wall surfaces
t_s_wall = Tw[:,0]
if iflag_suspended_ceiling == 1 : t_s_wall[4] = t_s_ce
if iflag_raised_floor == 1 : t_s_wall[5] = t_s_fl
if iflag_carpet == 1 : t_s_wall[5] = t_s_cp
# Wall surface nodes#
Q_dot_r_i_to_j = np.diag(t_s_wall) @ Ah_r_wall - Ah_r_wall @ np.diag(t_s_wall)
# Wall to indoor air convective exchanges#
Q_dot_c_in_to_wall = Ah_c_wall*(t_a_in-t_s_wall)
# Window heat balance
Q_dot_out_to_wd= AU_wd_no_in_bl*(t_out-t_s_wd)
Q_dot_c_wd_to_in=Ah_c_wd*(t_s_wd-t_a_in)
Q_dot_r_bl_to_wd=iflag_internal_blind*area_wd*sigma_boltzman*(t_s_bl+273)**4
Q_dot_r_wd_to_bl=iflag_internal_blind*area_wd*sigma_boltzman*(t_s_wd+273)**4
# Internal blind heat balance
Q_dot_c_bl_to_in=iflag_internal_blind*2*area_wd*h_c*(t_s_bl-t_a_in)
# wall to window radiative exchanges if there is no internal blind
Q_dot_r_wall_to_wd = (1-iflag_internal_blind)*Ah_r_wall_to_wd/h_r* sigma_boltzman*(t_s_wall+273)**4
Q_dot_r_wd_to_wall = (1-iflag_internal_blind)*Ah_r_wd_to_wall/h_r* sigma_boltzman*(t_s_wd+273)**4
# wall to internal blind radiative exchanges if there is an internal blind
Q_dot_r_wall_to_bl = iflag_internal_blind*Ah_r_wall_to_wd/h_r* sigma_boltzman*(t_s_wall+273)**4
Q_dot_r_bl_to_wall = iflag_internal_blind*Ah_r_wd_to_wall/h_r* sigma_boltzman*(t_s_bl+273)**4
# Wall surface node heat balance; Matrix Aij with axis=0 > sum on first index i i.e. sum of each column
Q_dot_in_to_wall = Q_dot_r_wd_to_wall - Q_dot_r_wall_to_wd + Q_dot_rad_wall[:,ind] + Q_dot_c_in_to_wall+ \
np.sum(Q_dot_r_i_to_j, axis=0) - np.sum(Q_dot_r_i_to_j, axis=1) + Q_dot_r_bl_to_wall - Q_dot_r_wall_to_bl
i1 = -Q_dot_cooling-np.sum(Q_dot_c_in_to_wall)+Q_dot_c_wd_to_in+Q_dot_c_bl_to_in+Q_dot_sensible_gains_conv[ind]
C1 = C_a_in
dT1_t = i1/C1
i2 = Q_dot_out_to_wd+np.sum(Q_dot_r_wall_to_wd)+Q_dot_rad_wd[ind]-np.sum(Q_dot_r_wd_to_wall)-Q_dot_c_wd_to_in+ \
Q_dot_r_bl_to_wd-Q_dot_r_wd_to_bl
C2 = C_gl
dT2_t = i2/C2
i3 = Q_dot_in_to_wall[4]/area_wall[4]
C3 = C_A_ce
dT3_t = i3/C3
i4 = Q_dot_in_to_wall[5]/area_wall[5]
C4 = C_A_fl
dT4_t = i4/C4
i5 = Q_dot_in_to_wall[5]/area_wall[5]
C5 = C_A_cp
dT5_t = i5/C5
i6 = iflag_internal_blind*Q_dot_sol_wd[ind]+Q_dot_r_wd_to_bl-Q_dot_r_bl_to_wd +np.sum(Q_dot_r_wall_to_bl) \
-np.sum(Q_dot_r_bl_to_wall)-Q_dot_c_bl_to_in
C6 = C_bl
dT6_t = i6/C6
#! All walls
shape = (n_walls, n_nodes)
fh_ext = np.zeros(shape)
dTw_t = np.zeros(shape)
fh_ext[:,0]= Q_dot_in_to_wall/area_wall
if iflag_suspended_ceiling == 1 : fh_ext[4,0] = U_air_layer * (t_s_wall[4] - Tw[4,0])
if iflag_raised_floor == 1 : fh_ext[5,0] = U_air_layer * (t_s_wall[5] - Tw[5,0])
if iflag_carpet == 1 : fh_ext[5,0] = U_half_carpet * (t_s_wall[5] - Tw[5,0])
for i in range(n_walls):
dTw_t[i,:] = np.linalg.inv(C_matrix[i,:,:]) @ (fh_ext[i,:] - L_matrix[i,:,:] @ Tw[i,:])
dTi_t = [dT1_t]
dTi_t.extend([dT2_t])
dTi_t.extend([dT3_t])
dTi_t.extend([dT4_t])
dTi_t.extend([dT5_t])
dTi_t.extend([dT6_t])
dTi_t.extend(dTw_t.flatten())
return (dTi_t, Q_dot_cooling) # allows to return more outputs than only dTi_T
def dTi_t(Ti, tau):
ret = model_dTi_t(Ti, tau) # only dTi_T is returned for ordinary differential integral
return ret[0]
Ti = odeint(dTi_t , Ti0, tau)
# dTi_t(np.array(Ti0), 600)
Q_dot_cooling = np.asarray([model_dTi_t(Ti[tt],tau[tt])[1] for tt in range(len(tau))])
return Q_dot_cooling
def Isol(month):
# Simulation period
n_day_sim=3
hour_start=0
n_hours_sim=24*n_day_sim
hour_stop=hour_start + n_hours_sim
tau_initial=hour_start*3600
tau_final=hour_stop*3600
DELTAtau=600 * 2 #[s]
# Time in s : Create an array of evenly-spaced values
tau = np.arange(tau_initial,tau_final+1,DELTAtau)
# Hour and Day from the start of the simulation
hour = tau/3600
hour_per_0 = hour-24*np.trunc(hour/24)
# np.choose(condition,[action if condition = 0 or false, action if condition = 1 or true])
# np.choose(array, [action if condition = 0, action if condition = 1 , action if condition = 2 ...)])
hour_per=np.choose(hour_per_0 > 0.000001,[24,hour_per_0])
day = hour/24
day_int_0 = np.trunc(hour/24)+1
day_int = day_int_0-1
# Sarting hour in sun data table according to month of simulation
month_start=max(1,min(12,month))
hour_start = np.zeros(13)
hour_start[1]=1+15*24; hour_start[2]=1+(31+15)*24; hour_start[3]=1+(31+28+15)*24; hour_start[4]=1+(2*31+28+15)*24; hour_start[5]=1+(2*31+28+30+15)*24; hour_start[6]=1+(3*31+28+30+15)*24
hour_start[7]=1+(3*31+28+2*30+15)*24; hour_start[8]=1+(4*31+28+2*30+15)*24; hour_start[9]=1+(5*31+28+2*30+15)*24; hour_start[10]=1+(5*31+28+3*30+15)*24
hour_start[11]=1+(6*31+28+3*30+15)*24; hour_start[12]=1+(6*31+28+4*30+15)*24
# Hour and Day from the start of the year (simulation starts at 15th of the considered month)
hour_yr = hour + float(hour_start[month]) - 1
day_yr = hour_yr/24
# External dry and wet temperatures for July: hour by hour from 0 to 24h (local solar hour)
h_sol = np.arange(25).astype(np.float32)
#Atmospheric pressure at sea level [Pa]
p_0 = 101325
#Estimation of atmospheric pressure at local height
#Scale height of the Rayleigh atmosphere near the earth surface [m]"
z_h = 8434.5
#Local height above the sea level [m]"
z_local = 100
p_atm = exp(-z_local/z_h)*p_0
np.set_printoptions(edgeitems=25)
phi_deg = 50.8411 # Latitude
lambda_deg = -5.5 # Longitude
n_days_year = 365
pi = np.pi
# Longitude expressed in hours
lambda_h = lambda_deg/15
sin_phi = sin(phi_deg * pi/180)
cos_phi = cos(phi_deg * pi/180)
tan_phi = tan(phi_deg * pi/180)
hour_sol_local = hour_yr
# Equation of time ET in hours
# beta = 2*pi/365 in rad/day, J = hour_sol_local/24, betaJ in rad
betaJ = (2*pi/n_days_year)*(hour_sol_local/24)
ET = (1/60) * (-0.00037+0.43177*np.cos(betaJ) - 3.165*np.cos(2*betaJ) - 0.07272*np.cos(3*betaJ) \
- 7.3764*np.sin(betaJ) - 9.3893*np.sin(2*betaJ) - 0.24498*np.sin(3*betaJ))
hour_sol_local_per = hour_sol_local-24*np.trunc(hour_sol_local/24)
# Assign 24 h to the zero h elements
hour_sol_local_per[hour_sol_local_per == 0] = 24
# day=1+np.trunc(hour_sol_local/24)
time_rad=2*pi*hour_sol_local/(24*365)
cos_time=np.cos(time_rad)
# hour_south_per = heure périodique égale à 0h quand le soleil est au Sud (azimut gamma = 0)
hour_south_per = hour_sol_local_per - 12
# Angle horaire omega en degres : omega = 0 quand le soleil est au Sud (azimut gamma = 0)
omega_deg = hour_south_per*15
sin_omega = np.sin(omega_deg * pi/180)
cos_omega = np.cos(omega_deg * pi/180)
# Sun declination delta en degres
time_rad=2*pi*hour_sol_local/(24*n_days_year)
time_lag_rad = 2*pi*(284/n_days_year)
sin_time_decl = np.sin(time_rad+time_lag_rad)
delta_rad=0.40928*sin_time_decl
delta_deg=(180/pi)*delta_rad
sin_delta = np.sin(delta_rad)
cos_delta = np.cos(delta_rad)
tan_delta = np.tan(delta_rad)
# Angle theta_z between sun beams and vertical"
theta_z_rad = np.abs(np.arccos(sin_delta*sin_phi+cos_delta*cos_phi*cos_omega))
cos_theta_z= np.cos(theta_z_rad)
sin_theta_z= np.sin(theta_z_rad)
theta_z_deg= (180/pi)*theta_z_rad
# Compute gamma_s : Sun azimuth "
# Azimut value comprised between -pi and +pi
gamma_s_rad = np.arctan2(sin_omega, cos_omega * sin_phi - tan_delta * cos_phi)
sin_gamma_s = np.sin(gamma_s_rad)
cos_gamma_s = np.cos(gamma_s_rad)
# Azimut value comprised between -180 and +180
gamma_s_deg = (180/pi)*gamma_s_rad
# Components of the unit vector parallel to sun beams in axes: South, East, Vertical
n_sun_beam_South = cos_gamma_s*sin_theta_z
n_sun_beam_East = sin_gamma_s*sin_theta_z
n_sun_beam_Vert = cos_theta_z
# Direct horizontal irradiance calculation
# Solar altitude angle: only when the sun is over the horizontal plane
h_s_deg = np.choose(theta_z_deg < 90,[0, 90 - theta_z_deg])
h_s_rad = (pi/180) * h_s_deg
# Compute I_th_cs from the solar radiation I_dot_n_0 external to the atmosphere
# Direct normal irradiance calculation
# Solar constant [W/m^2]
I_dot_0 = 1367
# Correction for the variation of sun-earth distance [W/m^2]
delta_I_dot_0 = 45.326
I_dot_n_0 = I_dot_0 + delta_I_dot_0*cos_time
I_th_0= np.where(cos_theta_z > 0,I_dot_n_0*cos_theta_z,0)
# Direct horizontal irradiance calculation
# Solar altitude angle: only when the sun is over the horizontal plane
h_s_deg = np.where(theta_z_deg < 90,90 - theta_z_deg , 0)
h_s_rad = (pi/180) * h_s_deg
# Correction of solar altitude angle for refraction
DELTAh_s_deg = 0.061359*(180/pi)*(0.1594+1.1230*(pi/180)*h_s_deg+0.065656*(pi/180)**2 * h_s_deg**2)/(1+28.9344*(pi/180)*h_s_deg+277.3971*(pi/180)**2 *h_s_deg**2)
h_s_true_deg = h_s_deg + DELTAh_s_deg
h_s_true_rad = (pi/180)* h_s_true_deg
# m_r: relative optical air mass
# m_r: ratio of the optical path lenght through atmosphere "
# and the optical path lenght through a standard atmosphere at sea level with the sun at the zenith"
# <NAME> (1989)"
m_r = p_atm / p_0 /(np.sin(h_s_true_rad) + 0.50572 *((h_s_true_deg + 6.07995)**(-1.6364)))
# delta_R: Integral Rayleigh optical thickness"
delta_R = np.where(m_r > 20, 1/(10.4+0.718*m_r), 1/ (6.62960+1.75130*m_r-0.12020*m_r**2+0.00650*m_r**3-0.00013*m_r**4))
# T_L_2: Linke turbidity factor for relative air mass = 2"
# Site turbidity beta_site: Country: 0.05 Urban:0.20"
beta_site = 0.05
T_L_summer = 3.302
T_L_winter = 2.455
T_L_avg=(T_L_summer+T_L_winter)/2
DELTAT_L=(T_L_summer-T_L_winter)/2
time_lag_w_deg=360*(-30.5/360-1/4)
time_lag_w_rad=2*pi*(-30.5/360-1/4)
sin_time_w = np.sin(time_rad+time_lag_w_rad)
T_L_2=T_L_avg+DELTAT_L*sin_time_w
# Direct horizontal irradiance"
I_bh_cs = I_dot_n_0 * np.sin(h_s_rad) * np.exp(-0.8662*T_L_2*m_r*delta_R)
# Direct normal irradiance
# Not considered if sun flicking the wall with an angle < 2°
I_beam_cs = np.where(cos_theta_z > 0.035, I_bh_cs / cos_theta_z, 0)
# Diffuse horizontal irradiance"
# T_rd: diffuse transmission for sun in zenith"
T_rd = -1.5843E-2+3.0543E-2*T_L_2+3.797E-4*T_L_2**2
# F_d: diffuse angular function"
A_0 = 2.6463E-1-6.1581E-2*T_L_2+3.1408E-3*T_L_2**2
A_1 = 2.0402+1.8945E-2*T_L_2-1.1161E-2*T_L_2**2
A_2 = -1.3025+3.9231E-2*T_L_2+8.5079E-3*T_L_2**2
F_d = A_0+A_1*np.sin(h_s_rad)+A_2*(np.sin(h_s_rad)**2)
# Diffuse horizontal irradiance"
I_dh_cs = np.where(h_s_deg > 2, I_dot_n_0*T_rd*F_d, 0)
I_test = I_dot_n_0*T_rd*F_d
# Total horizontal irradiance"
I_th_cs= I_bh_cs+I_dh_cs
# # Calibration from data of Printemps site
# I_th_cs= 1.1274044452626812 *I_th_cs_0
I_bh = I_bh_cs
I_dh = I_dh_cs
I_th = I_th_cs
theta_z_rad = theta_z_deg * (pi/180)
cos_theta_z= np.cos(theta_z_rad)
sin_theta_z= np.sin(theta_z_rad)
tan_theta_z= np.tan(theta_z_rad)
return I_bh, I_dh, I_th, theta_z_rad , cos_theta_z, sin_theta_z, tan_theta_z, gamma_s_deg, theta_z_deg, n_sun_beam_South, n_sun_beam_East, n_sun_beam_Vert
def Iwd(azimuth_wd_deg, H_B, A1, A2, A3, D_H, month):
slope_wd_deg = 90
angle_lateral_screens_deg = A1
angle_horiz_screen_deg = A2
f_prop_dist_vert_screen = D_H
if f_prop_dist_vert_screen > 0:
iflag_vert_screen= 1
else:
iflag_vert_screen= 0
angle_vert_screen_deg=min(A3,85)
H_B_wd=max(0.001,H_B)
Height_wd_0=H_B_wd*Breadth_wd_0
if Height_wd_0 <= Height_room-0.2:
Height_wd = Height_wd_0
Breadth_wd = Breadth_wd_0
else:
Height_wd = Height_room-0.2
Breadth_wd = (Height_room-0.2) / H_B_wd
if Height_wd <= Height_room-1:
Height_wd_sill = Height_wd_sill_0
else:
Height_wd_sill = Height_room-Height_wd-0.1
# "!Window area"
area_wd=Height_wd*Breadth_wd
SF_wd=(1-f_frame)*SF_gl
# "!Walls areas "
area_wall_0h=max(0,Height_room*Lenght_9h_3h-area_wd)
area_wall_3h=Height_room*Lenght_6h_0h
area_wall_6h=Height_room*Lenght_9h_3h
area_wall_9h=Height_room*Lenght_6h_0h
area_ceiling=Lenght_9h_3h*Lenght_6h_0h
area_floor=Lenght_9h_3h*Lenght_6h_0h
n_walls=6
area_wall = np.array([area_wall_0h, area_wall_3h, area_wall_6h, area_wall_9h, area_ceiling, area_floor])
area_wall_wd=np.sum(area_wall)+area_wd
# Define the gamma_w : wall azimuth
gamma_w_deg = azimuth_wd_deg
gamma_w_rad = gamma_w_deg*pi/180
sin_gamma_w = sin(gamma_w_rad)
cos_gamma_w = cos(gamma_w_rad)
# Define the p : slope
p_deg = slope_wd_deg
p_rad = p_deg *pi/180
sin_p = sin(p_rad)
cos_p = cos(p_rad)
# Ground reflexion: grass 0.2, snow 0.9
rho_ground=0
# Difference of azimuth between sun and wall
# deltagamma_deg comprised between 0° and 180°
# Where True, yield x, otherwise yield y.
deltagamma_deg = np.where(abs(gamma_s_deg - gamma_w_deg) > 180, abs(abs(gamma_s_deg - gamma_w_deg) - 360),
abs(gamma_s_deg - gamma_w_deg))
I_wd = np.zeros(len(gamma_s_deg))
I_wd_no_shading = np.zeros(len(gamma_s_deg))
for i in range(len(gamma_s_deg)):
if ((theta_z_deg[i] < 88) & (I_th[i] > 1) & (area_wd > 0.01)):
# deltagamma_rad comprised between 0 and pi
deltagamma_rad = deltagamma_deg[i] *pi/180
cos_dgamma = np.cos(deltagamma_rad)
sin_dgamma = np.sin(deltagamma_rad)
tan_dgamma = np.tan(deltagamma_rad)
# Compute ratio= cos(theta)/cos(theta_z)
# Cos of angle theta between sun beams and normal direction to the wall"
# Mask effect if sun flicking the wall with an angle < 2°
cos_theta = cos_dgamma * sin_theta_z[i]
cos_theta_cos_theta_z = cos_theta / cos_theta_z[i]
I_beam = I_bh[i]/ cos_theta_z[i] if (I_bh[i] > 0) else 0
I_b = cos_theta * I_beam if (cos_theta > 0) else 0
# Diffuse and reflected solar gains"
I_dr = I_dh[i] * (1+cos_p) / 2 + I_th[i] * rho_ground * (1-cos_p)/2
# Solar irradiance on the facade"
I_tv = I_b + I_dr
I_t_wd = I_tv
#Diffuse and reflected radiation on vertical plane
I_d_wd=I_dh[i]/2
# Shading factor"
# Lateral vertical screens supposed symetrical: horizontal angle measured from the center of the window
tan_A1 = tan(angle_lateral_screens_deg*pi/180)
Depth_lateral_screen = tan_A1*Breadth_wd/2
b_wd_shade_lateral_screen = Depth_lateral_screen*abs(tan_dgamma) if(cos_dgamma> 0) else 0
#Horizontal screen upside the window: vertical angle measured from the center of the window#
tan_A2 = tan(angle_horiz_screen_deg*pi/180)
Depth_horiz_screen = tan_A2*Height_wd/2
h_wd_shade_horiz_screen = Depth_horiz_screen/(tan_theta_z[i]*cos_dgamma) if ((tan_theta_z[i]*cos_dgamma > 0.001)& (cos_dgamma>0)) else 0
#Vertical screen facing the window: vertical angle measured from the center of the window
Dist_vert_screen = f_prop_dist_vert_screen*Height_wd
Hypoth_vert_screen = Dist_vert_screen/cos_dgamma if(cos_dgamma > 0.001) else 0
h_vert_screen_no_shade = Hypoth_vert_screen/tan_theta_z[i] if(tan_theta_z[i] > 0.001) else 0
tan_A3 = tan(angle_vert_screen_deg*pi/180)
h_vert_screen = Height_wd/2+Dist_vert_screen*tan_A3
h_vert_screen_shade = h_vert_screen-h_vert_screen_no_shade if(h_vert_screen > h_vert_screen_no_shade) else 0
h_wd_shade_vert_screen = h_vert_screen_shade if(h_vert_screen_no_shade > 0) else 0
#Shading factor
h_wd_shade = h_wd_shade_vert_screen+h_wd_shade_horiz_screen
dh_shade = Height_wd-h_wd_shade if (Height_wd>h_wd_shade) else 0
b_wd_shade = b_wd_shade_lateral_screen
db_shade = Breadth_wd-b_wd_shade if (Breadth_wd>b_wd_shade) else 0
area_no_shaded_wd=dh_shade *db_shade
f_no_shaded_wd=area_no_shaded_wd/area_wd
f_shading=(1-f_no_shaded_wd)
#! Solar gains through windows taking into account the shading due to external screens, without external blinds
I_wd[i] = (1-f_shading)*I_t_wd+f_shading*I_d_wd
I_wd_no_shading[i] = I_t_wd
return I_wd, I_wd_no_shading
def tout( month):
t_dry_july = np.array([20.6 , 20.3, 20.2 , 20.0, 20.1 , 20.3, 20.7, 21.3, 21.9 , 22.5 , 23.2 , \
23.8 , 26.6 , 29.2, 31.5 , 32.0, 31.5 , 30.3 , 28. , 25.6, 22.7 , 22.1, 21.6 , 21.1 , 20.6 ])
# # Correction month by month - Max daily External dry and wet temperatures
dt_dry_m = np.array([-11. , -10. , -7.8, -5.5, -2.5, -0.5, 0. , 0. , -2.5, -4.1, -8.2, -10.2])
dt_wet_m = np.array([-5.5, -5. , -3.9, -2.7, -2.3, 0. , 0. , 0. , -0.5, -2.3, -3.9, -5. ])
dt_dry = dt_dry_m[month-1]
dt_wet = dt_wet_m[month-1]
# # External dry and wet temperatures for the current month: hour by hour from 0 to 24h (local solar hour)
t_dry_std = t_dry_july + dt_dry
t_dry = t_dry_std
return t_dry
def DTE(alpha_wall, M_A, azimuth_w_deg, slope_w_deg, iflag_shading, month, t_in):
M_per_A_wall=max(10,M_A)
t_init=26 #[°C]
# Daily average external temperature, in °C
t_out_avg = 24
# Daily variation of external temperature, in °C
DELTAt_out = 17
# "Air properties:"
v_a=0.8401 # [m^3/kg] "specific volume of humid air per kg of dry air"
c_p_a=1020 # [J/kg-K] "specific heat capacity of humid air per kg of dry air"
sigma_boltzman=5.67E-8
# "!Boundary layers"
# h_r=5 # [W/m^2-K]
# h_c=3 # [W/m^2-K]
# h_in=h_r + h_c
# h_out=17.5 # [W/m^2-K]
# "!Outside insulation for external wall "
R_out= 2 # [m^2-K/W]
# "!Days of simulation"
n_day_sim=3
# Wall azimuth angle gamma is comprised between -180° and 180°#
if azimuth_w_deg > 180 :
gamma_w_deg = azimuth_w_deg-360
else:
gamma_w_deg = azimuth_w_deg
# concrete bloc
rho_concrete_bloc=1200 #[kg/m^3]
lambda_concrete_bloc=1.273 #[W/m.K]
c_p_concrete_bloc=840 #[J/kg.K]
# "!Total number of finite element layers, with two degree two elements by layer"
n_layers = 2
nl=n_layers
# "! internal vertical wall layers"
thickness_w = M_per_A_wall/rho_concrete_bloc
thickness_wall = thickness_w/n_layers * np.ones(n_layers)
lambda_wall = lambda_concrete_bloc * np.ones(n_layers)
rho_wall = rho_concrete_bloc * np.ones(n_layers)
c_layer_wall = c_p_concrete_bloc * np.ones(n_layers)
# Matrixes of vertical wall layers"
n_elem,R_nobl_wall,L_wall,C_wall = wall_matrix(n_layers,thickness_wall,lambda_wall,rho_wall,c_layer_wall)
R_value_wall=1/h_in+R_nobl_wall+1/h_out
U_value_wall=1/R_value_wall
n_nodes=2*n_elem+1
# Initial conditions"
t_a_in_set = t_in + 0.01 #[°C]
t_a_in_init=t_a_in_set
# Simulation period
n_day_sim=3
hour_start=0
n_hours_sim=24*n_day_sim
hour_stop=hour_start + n_hours_sim
tau_initial=hour_start*3600
tau_final=hour_stop*3600
DELTAtau=600 * 2 #[s]
# Time in s : Create an array of evenly-spaced values
tau = np.arange(tau_initial,tau_final+1,DELTAtau)
# Hour and Day from the start of the simulation
hour = tau/3600
hour_per_0 = hour-24*np.trunc(hour/24)
# np.choose(condition,[action if condition = 0 or false, action if condition = 1 or true])
# np.choose(array, [action if condition = 0, action if condition = 1 , action if condition = 2 ...)])
hour_per=np.choose(hour_per_0 > 0.000001,[24,hour_per_0])
day = hour/24
day_int_0 = np.trunc(hour/24)+1
day_int = day_int_0-1
# Sarting hour in sun data table according to month of simulation
month_start=max(1,min(12,month))
hour_start = np.zeros(13)
hour_start[1]=1+15*24; hour_start[2]=1+(31+15)*24; hour_start[3]=1+(31+28+15)*24; hour_start[4]=1+(2*31+28+15)*24; hour_start[5]=1+(2*31+28+30+15)*24; hour_start[6]=1+(3*31+28+30+15)*24
hour_start[7]=1+(3*31+28+2*30+15)*24; hour_start[8]=1+(4*31+28+2*30+15)*24; hour_start[9]=1+(5*31+28+2*30+15)*24; hour_start[10]=1+(5*31+28+3*30+15)*24
hour_start[11]=1+(6*31+28+3*30+15)*24; hour_start[12]=1+(6*31+28+4*30+15)*24
# Hour and Day from the start of the year (simulation starts at 15th of the considered month)
hour_yr = hour + float(hour_start[month])
day_yr = hour_yr/24
# External dry and wet temperatures for July: hour by hour from 0 to 24h (local solar hour)
h_sol = np.arange(25).astype(np.float32)
# t_dry_july = np.array([21. , 18.5, 16. , 15.5, 15. , 15.5, 16. , 18.5, 21. , 24. , 27. , \
# 29. , 31. , 31.5, 32. , 31.5, 31. , 30. , 29. , 27.5, 26. , 24.5, 23. , 22. , 21. ])
# t_wet_july = np.array([16.15,15.24,14.3,14.11,13.92,14.11,14.3,15.24,16.15,17.21,18.22, \
# 18.88,19.52,19.67,19.83,19.67,19.52,19.2,18.88,18.39,17.89,17.38,16.86,16.51,16.15])
t_dry_july = np.array([20.6 , 20.3, 20.2 , 20.0, 20.1 , 20.3, 20.7, 21.3, 21.9 , 22.5 , 23.2 , \
23.8 , 26.6 , 29.2, 31.5 , 32.0, 31.5 , 30.3 , 28. , 25.6, 22.7 , 22.1, 21.6 , 21.1 , 20.6 ])
# # Correction month by month - Max daily External dry and wet temperatures
dt_dry_m = np.array([-11. , -10. , -7.8, -5.5, -2.5, -0.5, 0. , 0. , -2.5, -4.1, -8.2, -10.2])
dt_wet_m = np.array([-5.5, -5. , -3.9, -2.7, -2.3, 0. , 0. , 0. , -0.5, -2.3, -3.9, -5. ])
dt_dry = dt_dry_m[month-1]
dt_wet = dt_wet_m[month-1]
# # External dry and wet temperatures for the current month: hour by hour from 0 to 24h (local solar hour)
t_dry_std = t_dry_july + dt_dry
# t_wet_std = t_wet_july - dt_wet
# t_dry_avg_std = np.average(t_dry_std)
# DELTAt_dry_std = np.max(t_dry_std) - np.min(t_dry_std)
# t_wet_avg_std = np.average(t_wet_std)
# DELTAt_wet_std = np.max(t_wet_std) - np.min(t_wet_std)
# # Profile adapted to the data given for t_out_avg and DELTAt_out
# t_dry = t_out_avg + (t_dry_std-t_dry_avg_std) * (DELTAt_out/DELTAt_dry_std)
# t_wet = t_out_avg - (t_dry_avg_std - t_wet_avg_std) + (t_wet_std-t_wet_avg_std) * (DELTAt_out/DELTAt_dry_std)
t_dry = t_dry_std
df = pd.DataFrame(tau, columns=['tau'])
df['hour'] = hour
df['day'] = day
df['day_int'] = day_int
df['hour_yr'] = hour_yr
df['day_yr'] = day_yr
df['hour_per'] = hour_per
df1 = | pd.DataFrame(h_sol, columns=['hour_per']) | pandas.DataFrame |
import datetime
import numpy as np
from numpy import nan
import pandas as pd
import pytz
import pytest
from pytz.exceptions import UnknownTimeZoneError
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pvlib.location import Location
from test_solarposition import expected_solpos
from conftest import requires_scipy
aztz = pytz.timezone('US/Arizona')
def test_location_required():
Location(32.2, -111)
def test_location_all():
Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
@pytest.mark.parametrize('tz', [
aztz, 'America/Phoenix', -7, -7.0,
])
def test_location_tz(tz):
Location(32.2, -111, tz)
def test_location_invalid_tz():
with pytest.raises(UnknownTimeZoneError):
Location(32.2, -111, 'invalid')
def test_location_invalid_tz_type():
with pytest.raises(TypeError):
Location(32.2, -111, [5])
def test_location_print_all():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
expected_str = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__str__() == expected_str
def test_location_print_pytz():
tus = Location(32.2, -111, aztz, 700, 'Tucson')
expected_str = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__str__() == expected_str
@requires_scipy
def test_get_clearsky():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times)
expected = pd.DataFrame(data=np.array([
( 0.0, 0.0, 0.0),
(262.77734276159333, 791.1972825869296, 46.18714900637892),
(616.764693938387, 974.9610353623959, 65.44157429054201),
(419.6512657626518, 901.6234995035793, 54.26016437839348),
( 0.0, 0.0, 0.0)],
dtype=[('ghi', '<f8'), ('dni', '<f8'), ('dhi', '<f8')]), index=times)
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_ineichen_supply_linke():
tus = Location(32.2, -111, 'US/Arizona', 700)
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h')
times_localized = times.tz_localize(tus.tz)
expected = pd.DataFrame(np.
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 79.73090244, 316.16436502, 40.45759009],
[ 703.43653498, 876.41452667, 95.15798252],
[ 1042.37962396, 939.86391062, 118.44687715],
[ 851.32411813, 909.11186737, 105.36662462],
[ 257.18266827, 646.16644264, 62.02777094],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['ghi', 'dni', 'dhi'],
index=times_localized)
out = tus.get_clearsky(times_localized, linke_turbidity=3)
assert_frame_equal(expected, out, check_less_precise=2)
def test_get_clearsky_haurwitz():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='haurwitz')
expected = pd.DataFrame(data=np.array(
[[ 0. ],
[ 242.30085588],
[ 559.38247117],
[ 384.6873791 ],
[ 0. ]]),
columns=['ghi'],
index=times)
assert_frame_equal(expected, clearsky)
def test_get_clearsky_simplified_solis():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='simplified_solis')
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 70.00146271, 638.01145669, 236.71136245],
[ 101.69729217, 852.51950946, 577.1117803 ],
[ 86.1679965 , 755.98048017, 385.59586091],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_apparent_elevation():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
solar_position = {'apparent_elevation': pd.Series(80, index=times),
'apparent_zenith': pd.Series(10, index=times)}
clearsky = tus.get_clearsky(times, model='simplified_solis',
solar_position=solar_position)
expected = pd.DataFrame(data=np.
array([[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_dni_extra():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='simplified_solis',
dni_extra=1370)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 67.82281485, 618.15469596, 229.34422063],
[ 98.53217848, 825.98663808, 559.15039353],
[ 83.48619937, 732.45218243, 373.59500313],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
| assert_frame_equal(expected, clearsky) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# Filename: analyze_dataAug_results
"""
introduction:
authors: <NAME>
email:<EMAIL>
add time: 29 March, 2021
"""
import os, sys
code_dir = os.path.expanduser('~/codes/PycharmProjects/Landuse_DL')
sys.path.insert(0, code_dir)
import basic_src.io_function as io_function
import pandas as pd
def output_max_min_miou(pd_table):
data_aug_options = pd_table['data_augmentation'].tolist()
# train_class_0 train_class_1 val_class_0 val_class_1 class_1 overall time_total_h
train_c0_count = pd_table['train_class_0'].tolist()
train_c1_count = pd_table['train_class_1'].tolist()
val_c0_count = pd_table['val_class_0'].tolist()
val_c1_count = pd_table['val_class_1'].tolist()
mIOU_c1 = pd_table['class_1'].tolist()
mIOU_overall = pd_table['overall'].tolist()
total_time = pd_table['time_total_h'].tolist()
# find max and min mIOU for class_1
max_index = mIOU_c1.index(max(mIOU_c1))
print('class_1::: mIOU: %f, train_c0_num:%d, train_c1_num:%d, val_c0_count:%d, val_c1_count:%d,'
'total_time_h: %f,augmentation option: %s, '%( mIOU_c1[max_index], train_c0_count[max_index], train_c1_count[max_index],
val_c0_count[max_index], val_c1_count[max_index], total_time[max_index],data_aug_options[max_index],))
min_index = mIOU_c1.index(min(mIOU_c1))
print('class_1::: mIOU: %f, train_c0_num:%d, train_c1_num:%d, val_c0_count:%d, val_c1_count:%d,'
'total_time_h: %f, augmentation option: %s '%( mIOU_c1[min_index], train_c0_count[min_index], train_c1_count[min_index],
val_c0_count[min_index], val_c1_count[min_index], total_time[min_index],data_aug_options[min_index]))
# find max and min mIOU for overall
max_index = mIOU_overall.index(max(mIOU_overall))
print('overall::: mIOU: %f, train_c0_num:%d, train_c1_num:%d, val_c0_count:%d, val_c1_count:%d,'
'total_time_h: %f,augmentation option: %s, '%( mIOU_overall[max_index], train_c0_count[max_index], train_c1_count[max_index],
val_c0_count[max_index], val_c1_count[max_index], total_time[max_index],data_aug_options[max_index],))
min_index = mIOU_overall.index(min(mIOU_overall))
print('overall::: mIOU: %f, train_c0_num:%d, train_c1_num:%d, val_c0_count:%d, val_c1_count:%d,'
'total_time_h: %f, augmentation option: %s '%( mIOU_overall[min_index], train_c0_count[min_index], train_c1_count[min_index],
val_c0_count[min_index], val_c1_count[min_index], total_time[min_index],data_aug_options[min_index]))
def output_miou_for_each_dataAug_options(pd_table):
data_aug_options = pd_table['data_augmentation'].tolist()
mIOU_c1 = pd_table['class_1'].tolist()
mIOU_overall = pd_table['overall'].tolist()
aug_options_c1 = {}
aug_options_overall = {}
for opt, miou_c1, miou_o in zip(data_aug_options, mIOU_c1, mIOU_overall):
# print(opt, miou_c1, miou_o)
opt_list = [item.strip() for item in opt.split(',')]
for aug in opt_list:
if aug in aug_options_c1.keys():
aug_options_c1[aug].append(miou_c1)
else:
aug_options_c1[aug] = [miou_c1]
if aug in aug_options_overall.keys():
aug_options_overall[aug].append(miou_o)
else:
aug_options_overall[aug] = [miou_o]
for key in aug_options_c1:
value_ist = aug_options_c1[key]
print('class_1: exp count: %d, mean, max, and min miou_c1: %f %f %f, aug option: %s'%
(len(value_ist), sum(value_ist)/len(value_ist), max(value_ist), min(value_ist),key))
for key in aug_options_overall:
value_ist = aug_options_overall[key]
print('overall: exp count: %d, mean, max, and min miou_c1: %f %f %f, aug option: %s'%
(len(value_ist), sum(value_ist)/len(value_ist), max(value_ist), min(value_ist),key))
def find_info_realted_to_train_dir(train_val_table, train_dir, info_key):
folder_list = train_val_table['folder'].tolist()
info_list = train_val_table[info_key].tolist()
for dir, info in zip(folder_list, info_list):
if dir == train_dir:
return info
return None
def output_mean_max_miou_all_test_data(test_xlsx_list, train_val_table):
mean_miou_c1_each_test = {}
max_miou_c1_each_test = {}
max_miou_c1_test_aug_options = {}
for xlsx in test_xlsx_list:
print(xlsx)
test_pd_table = pd.read_excel(xlsx)
miou_c1_list = test_pd_table['class_1'].tolist()
train_dir_list = test_pd_table['train_dir'].tolist()
key = os.path.splitext(os.path.basename(xlsx))[0]
mean_miou_c1_each_test[key] = sum(miou_c1_list)/len(miou_c1_list)
max_miou_c1_each_test[key] = max(miou_c1_list)
# get trianing_dir
max_idx = miou_c1_list.index(max_miou_c1_each_test[key])
train_dir = train_dir_list[max_idx]
data_aug_options = find_info_realted_to_train_dir(train_val_table,train_dir,'data_augmentation')
max_miou_c1_test_aug_options[key] = data_aug_options
key_list = list(mean_miou_c1_each_test.keys())
key_list.sort()
mean_list = []
max_list = []
aug_option_list = []
for key in key_list:
print('%s mean miou c1: %f, max miou c1: %f'%(key, mean_miou_c1_each_test[key], max_miou_c1_each_test[key]))
mean_list.append(mean_miou_c1_each_test[key])
max_list.append(max_miou_c1_each_test[key])
aug_option_list.append(max_miou_c1_test_aug_options[key])
# data augmentation count:
data_option_count = {}
for key in key_list:
opt_list = [ item.strip() for item in max_miou_c1_test_aug_options[key].split(',')]
for opt in opt_list:
if opt in data_option_count.keys():
data_option_count[opt] += 1
else:
data_option_count[opt] = 1
print(data_option_count)
save_dict = {'test_images':key_list, 'mean_miou_class_1':mean_list,
'max_miou_class_1':max_list, 'max_miou_aug_options':aug_option_list}
save_dict_pd = | pd.DataFrame(save_dict) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv('data.csv')
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats.mstats import gmean
import sys
labels = pd.read_csv("labels.txt",sep= ' ',header=None)
df1 = | pd.read_csv("fmow_imagenet1k-resnext-101-cnn-only-all_8_simplecut_test.txt",header=None) | pandas.read_csv |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from tqdm import tqdm, trange
import pandas as pd
import io
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import re
import argparse
from pytorch_transformers import BertTokenizer
from other_func import write_log, preprocess1, preprocessing
from sklearn.model_selection import KFold
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--original_data",
default=None,
type=str,
required=True,
help="The input data file path."
" Should be the .tsv file (or other data file) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the processed data will be written.")
parser.add_argument("--temp_dir",
default=None,
type=str,
required=True,
help="The output directory where the intermediate processed data will be written.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task.")
parser.add_argument("--log_path",
default=None,
type=str,
required=True,
help="The log file path.")
parser.add_argument("--id_num_neg",
default=None,
type=int,
required=True,
help="The number of admission ids that we want to use for negative category.")
parser.add_argument("--id_num_pos",
default=None,
type=int,
required=True,
help="The number of admission ids that we want to use for positive category.")
parser.add_argument("--random_seed",
default=1,
type=int,
required=True,
help="The random_seed for train/val/test split.")
parser.add_argument("--bert_model",
default="bert-base-uncased",
type=str,
required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
## Other parameters
parser.add_argument("--Kfold",
default=None,
type=int,
required=False,
help="The number of folds that we want ot use for cross validation. "
"Default is not doing cross validation")
args = parser.parse_args()
RANDOM_SEED = args.random_seed
LOG_PATH = args.log_path
TEMP_DIR = args.temp_dir
if os.path.exists(TEMP_DIR) and os.listdir(TEMP_DIR):
raise ValueError("Temp Output directory ({}) already exists and is not empty.".format(TEMP_DIR))
os.makedirs(TEMP_DIR, exist_ok=True)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
original_df = | pd.read_csv(args.original_data, header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import demjson
import pandas as pd
from spider.setting import col_names
class JsonParse:
'''
解析网页信息
'''
def __init__(self, htmlCode):
self.htmlCode = htmlCode
self.json = demjson.decode(htmlCode)
pass
def parseTool(self, content):
'''
清除html标签
'''
if type(content) != str: return content
sublist = ['<p.*?>', '</p.*?>', '<b.*?>', '</b.*?>', '<div.*?>', '</div.*?>',
'</br>', '<br />', '<ul>', '</ul>', '<li>', '</li>', '<strong>',
'</strong>', '<table.*?>', '<tr.*?>', '</tr>', '<td.*?>', '</td>',
'\r', '\n', '&.*?;', '&', '#.*?;', '<em>', '</em>']
try:
for substring in [re.compile(string, re.S) for string in sublist]:
content = re.sub(substring, "", content).strip()
except:
raise Exception('Error ' + str(substring.pattern))
return content
def parsePage(self):
'''
解析并计算页面数量
:return: 页面数量
'''
totalCount = self.json['content']['positionResult']['totalCount'] # 职位总数量
resultSize = self.json['content']['positionResult']['resultSize'] # 每一页显示的数量
pageCount = int(totalCount) // int(resultSize) + 1 # 页面数量
return pageCount
def parseInfo(self):
'''
解析信息
'''
info = []
for position in self.json['content']['positionResult']['result']:
i = []
i.append(position['positionId'])
i.append(position['positionName'])
i.append(position['salary'])
i.append(position['workYear'])
i.append(position['education'])
i.append(position['jobNature'])
i.append(position['isSchoolJob'])
i.append(position['positionAdvantage'])
i.append(position['firstType'])
i.append(position['secondType'])
i.append(position['companyId'])
i.append(position['companySize'])
i.append(position['financeStage'])
i.append(position['industryField'])
i.append(position['companyShortName'])
i.append(position['companyFullName'])
i.append(position['city'])
i.append(position['district'])
i.append(position['longitude'])
i.append(position['latitude'])
i.append(position['formatCreateTime'])
i.append(position['resumeProcessRate'])
i.append(position['resumeProcessDay'])
info.append(i)
df = | pd.DataFrame(info, columns=col_names) | pandas.DataFrame |
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, recall_score, precision_score, classification_report, confusion_matrix
import numpy as np
from sklearn.model_selection import StratifiedKFold
# This script conducts a hyperparameter tuning of SVM for our problem. This happens in gradual steps. Moreover, the
# test set is imported and the final evaluation of the selected model takes place and the results are printed.
seed = np.random.seed = 7
# Loading the Training Set
X_train = pd.read_csv('Train.csv')
y_train = X_train[X_train.columns[-1]]
X_train = X_train.drop(columns=X_train.columns[-1], axis=1)
# Stratified 5-fold split object that will be used
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
# SVC: Decide on the best Kernel
svc = SVC(gamma="scale")
parameters = {'kernel': ('poly', 'sigmoid', 'linear', 'rbf'), 'C': [1, 10]}
clf = GridSearchCV(svc, parameters, cv=cv, n_jobs=2, verbose=3)
clf.fit(X_train, y_train)
results = pd.concat([pd.DataFrame(clf.cv_results_["params"]),
pd.DataFrame(clf.cv_results_["mean_test_score"], columns=["Accuracy"])], axis=1)
print('Accuracy Score for Different Kernels')
print(results)
# SVC to find optimal hyperparameters
svc = SVC()
parameters = {'kernel': ['rbf'], 'C': [13, 16], 'gamma': [0.55, 0.58]}
clf = GridSearchCV(svc, parameters, cv=cv, n_jobs=1, pre_dispatch=2, refit=False, verbose=3)
clf.fit(X_train, y_train)
results = pd.concat([pd.DataFrame(clf.cv_results_["params"]),
| pd.DataFrame(clf.cv_results_["mean_test_score"], columns=["Accuracy"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import operator as op
import seaborn as sns
# http://data8.org/datascience/_modules/datascience/tables.html
#####################
# Frame Manipulation
def relabel(df, OriginalName, NewName):
return df.rename(index=str, columns={OriginalName: NewName})
# https://docs.python.org/3.4/library/operator.html
def where(df, column, value, operation=op.eq):
return pd.DataFrame( df.loc[operation(df.loc[:,column], value) ,:] )
def select(df, *column_or_columns):
table = | pd.DataFrame() | pandas.DataFrame |
"""
It is observed that if last trade is profitable, next trade would more likely be a loss.
Then why not create a ghost trader on the same strategy; and trade only when the ghost trader's a loss.
Elements: two moving averages; rsi; donchain channel
conditions: 1. long if short MA > long MA, rsi lower than overbought 70, new high
2. short if short MA < long MA, ris higher than oversold 30, new low
exit: 1. exit long if lower than donchian lower band
2. exit short if higher than donchian upper band
"""
import os
import numpy as np
import pandas as pd
import pytz
from datetime import datetime, timezone
import multiprocessing
import talib
import quanttrader as qt
import matplotlib.pyplot as plt
import empyrical as ep
import pyfolio as pf
# set browser full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
class GhostTrader(qt.StrategyBase):
def __init__(self,
ma_short=3, ma_long=21, rsi_n = 9, rsi_oversold=30, rsi_overbought=70, donchian_n = 21
):
super(GhostTrader, self).__init__()
self.ma_short = ma_short
self.ma_long = ma_long
self.rsi_n = rsi_n
self.rsi_oversold = rsi_oversold
self.rsi_overbought = rsi_overbought
self.donchian_n = donchian_n
self.lookback = max(ma_long, rsi_n, donchian_n)
self.long_ghost_virtual = False
self.long_ghost_virtual_price = 0.0
self.short_ghost_virtual = False
self.short_ghost_virtual_price = 0.0
self.current_time = None
def on_tick(self, tick_event):
self.current_time = tick_event.timestamp
# print('Processing {}'.format(self.current_time))
symbol = self.symbols[0]
df_hist = self._data_board.get_hist_price(symbol, tick_event.timestamp)
# wait for enough bars
if df_hist.shape[0] < self.lookback:
return
current_price = df_hist.iloc[-1].Close
current_size = self._position_manager.get_position_size(symbol)
npv = self._position_manager.current_total_capital
ema_short = talib.EMA(df_hist['Close'], self.ma_short).iloc[-1]
ema_long = talib.EMA(df_hist['Close'], self.ma_long).iloc[-1]
rsi = talib.RSI(df_hist['Close'], self.rsi_n).iloc[-1]
long_stop = min(df_hist.Low.iloc[-self.donchian_n:])
short_stop = max(df_hist.High.iloc[-self.donchian_n:])
# fast ma > slow ma, rsi < 70, new high
if current_size == 0 and ema_short > ema_long and rsi < self.rsi_overbought and \
df_hist.High.iloc[-1] > df_hist.High.iloc[-2]:
# ghost long
if self.long_ghost_virtual == False:
print('Ghost long, Pre-Price: %.2f, Long Price: %.2f' %
(df_hist['Close'].iloc[-2],
df_hist['Close'].iloc[-1]
))
self.long_ghost_virtual_price = df_hist['Close'].iloc[-1]
self.long_ghost_virtual = True
# actual long; after ghost loss
if self.long_ghost_virtual == True and self.long_ghost_virtual_price > df_hist['Close'].iloc[-1]:
self.long_ghost_virtual = False
target_size = (int)(npv / current_price)
self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print('BUY ORDER SENT, Pre-Price: %.2f, Price: %.2f, ghost price %.2f, Size: %.2f' %
(df_hist['Close'].iloc[-2],
df_hist['Close'].iloc[-1],
self.long_ghost_virtual_price,
target_size))
# close long if below Donchian lower band
elif current_size > 0 and df_hist['Close'].iloc[-1] <= long_stop:
target_size = 0
self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print('CLOSE LONG ORDER SENT, Pre-Price: %.2f, Price: %.2f, Low: %.2f, Stop: %.2f, Size: %.2f' %
(df_hist['Close'].iloc[-2],
df_hist['Close'].iloc[-1],
df_hist['Low'].iloc[-1],
long_stop,
target_size))
# fast ma < slow ma, rsi > 30, new low
if current_size == 0 and ema_short < ema_long and rsi > self.rsi_oversold and \
df_hist['Low'].iloc[-1] < df_hist['Low'].iloc[-2]:
# ghost short
if self.short_ghost_virtual == False:
print('Ghost short, Pre-Price: %.2f, Long Price: %.2f' %
(df_hist['Close'].iloc[-2],
df_hist['Close'].iloc[-1]
))
self.short_ghost_virtual_price = df_hist['Close'].iloc[-1]
self.short_ghost_virtual = True
# actual short; after ghost loss
if self.short_ghost_virtual == True and self.short_ghost_virtual_price < df_hist['Close'].iloc[-1]:
self.short_ghost_virtual = False
target_size = -(int)(npv / current_price)
self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print('SELL ORDER SENT, Pre-Price: %.2f, Price: %.2f, ghost price %.2f, Size: %.2f' %
(df_hist['Close'].iloc[-2],
df_hist['Close'].iloc[-1],
self.short_ghost_virtual_price,
target_size))
# close short if above Donchian upper band
elif current_size < 0 and df_hist['High'].iloc[-1] >= short_stop:
target_size = 0
self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print('CLOSE SHORT ORDER SENT, Pre-Price: %.2f, Price: %.2f, Low: %.2f, Stop: %.2f, Size: %.2f' %
(df_hist['Close'].iloc[-2],
df_hist['Close'].iloc[-1],
df_hist['High'].iloc[-1],
short_stop,
0))
def parameter_search(engine, tag, target_name, return_dict):
"""
This function should be the same for all strategies.
The only reason not included in quanttrader is because of its dependency on pyfolio (to get perf_stats)
"""
ds_equity, _, _ = engine.run()
try:
strat_ret = ds_equity.pct_change().dropna()
perf_stats_strat = pf.timeseries.perf_stats(strat_ret)
target_value = perf_stats_strat.loc[target_name] # first table in tuple
except KeyError:
target_value = 0
return_dict[tag] = target_value
if __name__ == '__main__':
do_optimize = False
run_in_jupyter = False
symbol = 'SPX'
benchmark = 'SPX'
datapath = os.path.join('../data/', f'{symbol}.csv')
data = qt.util.read_ohlcv_csv(datapath)
init_capital = 100_000.0
test_start_date = datetime(2010,1,1, 8, 30, 0, 0, pytz.timezone('America/New_York'))
test_end_date = datetime(2019,12,31, 6, 0, 0, 0, pytz.timezone('America/New_York'))
if do_optimize: # parallel parameter search
params_list = [{'ma_short': 3, 'ma_long': 21, 'rsi_n': 9, 'rsi_oversold': 30, 'rsi_overbought': 70, 'donchian_n': 21},
{'ma_short': 5, 'ma_long': 21, 'rsi_n': 9, 'rsi_oversold': 20, 'rsi_overbought': 80, 'donchian_n': 21}]
target_name = 'Sharpe ratio'
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for params in params_list:
strategy = GhostTrader()
strategy.set_capital(init_capital)
strategy.set_symbols([symbol])
backtest_engine = qt.BacktestEngine(test_start_date, test_end_date)
backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy
backtest_engine.add_data(symbol, data)
strategy.set_params({'ma_short': params['ma_short'], 'rsi_oversold': params['rsi_oversold'], 'rsi_overbought': params['rsi_overbought']})
backtest_engine.set_strategy(strategy)
tag = (params['ma_short'], params['rsi_oversold'])
p = multiprocessing.Process(target=parameter_search, args=(backtest_engine, tag, target_name, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for k,v in return_dict.items():
print(k, v)
else:
strategy = GhostTrader()
strategy.set_capital(init_capital)
strategy.set_symbols([symbol])
strategy.set_params(None)
# Create a Data Feed
backtest_engine = qt.BacktestEngine(test_start_date, test_end_date)
backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy
backtest_engine.add_data(symbol, data)
backtest_engine.set_strategy(strategy)
ds_equity, df_positions, df_trades = backtest_engine.run()
# save to excel
qt.util.save_one_run_results('./output', ds_equity, df_positions, df_trades)
# ------------------------- Evaluation and Plotting -------------------------------------- #
strat_ret = ds_equity.pct_change().dropna()
strat_ret.name = 'strat'
bm = qt.util.read_ohlcv_csv(os.path.join('../data/', f'{benchmark}.csv'))
bm_ret = bm['Close'].pct_change().dropna()
bm_ret.index = | pd.to_datetime(bm_ret.index) | pandas.to_datetime |
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame"""
# Third Party
import pandas as pd
# Local
from zat import zeek_log_reader
class LogToDataFrame(object):
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame
Notes:
This class has recently been overhauled from a simple loader to a more
complex class that should in theory:
- Select better types for each column
- Should be faster
- Produce smaller memory footprint dataframes
If you have any issues/problems with this class please submit a GitHub issue.
More Info: https://supercowpowers.github.io/zat/large_dataframes.html
"""
def __init__(self):
"""Initialize the LogToDataFrame class"""
# First Level Type Mapping
# This map defines the types used when first reading in the Zeek log into a 'chunk' dataframes.
# Types (like time and interval) will be defined as one type at first but then
# will undergo further processing to produce correct types with correct values.
# See: https://stackoverflow.com/questions/29245848/what-are-all-the-dtypes-that-pandas-recognizes
# for more info on supported types.
self.type_map = {'bool': 'category', # Can't hold NaN values in 'bool', so we're going to use category
'count': 'UInt64',
'int': 'Int32',
'double': 'float',
'time': 'float', # Secondary Processing into datetime
'interval': 'float', # Secondary processing into timedelta
'port': 'UInt16'
}
def _get_field_info(self, log_filename):
"""Internal Method: Use ZAT log reader to read header for names and types"""
_zeek_reader = zeek_log_reader.ZeekLogReader(log_filename)
_, field_names, field_types, _ = _zeek_reader._parse_zeek_header(log_filename)
return field_names, field_types
def _create_initial_df(self, log_filename, all_fields, usecols, dtypes):
"""Internal Method: Create the initial dataframes by using Pandas read CSV (primary types correct)"""
return pd.read_csv(log_filename, sep='\t', names=all_fields, usecols=usecols, dtype=dtypes, comment="#", na_values='-')
def create_dataframe(self, log_filename, ts_index=True, aggressive_category=True, usecols=None):
""" Create a Pandas dataframe from a Bro/Zeek log file
Args:
log_fllename (string): The full path to the Zeek log
ts_index (bool): Set the index to the 'ts' field (default = True)
aggressive_category (bool): convert unknown columns to category (default = True)
usecol (list): A subset of columns to read in (minimizes memory usage) (default = None)
"""
# Grab the field information
field_names, field_types = self._get_field_info(log_filename)
all_fields = field_names # We need ALL the fields for later
# If usecols is set then we'll subset the fields and types
if usecols:
# Usecols needs to include ts
if 'ts' not in usecols:
usecols.append('ts')
field_types = [t for t, field in zip(field_types, field_names) if field in usecols]
field_names = [field for field in field_names if field in usecols]
# Get the appropriate types for the Pandas Dataframe
pandas_types = self.pd_column_types(field_names, field_types, aggressive_category)
# Now actually read in the initial dataframe
self._df = self._create_initial_df(log_filename, all_fields, usecols, pandas_types)
# Now we convert 'time' and 'interval' fields to datetime and timedelta respectively
for name, zeek_type in zip(field_names, field_types):
if zeek_type == 'time':
self._df[name] = pd.to_datetime(self._df[name], unit='s')
if zeek_type == 'interval':
self._df[name] = | pd.to_timedelta(self._df[name], unit='s') | pandas.to_timedelta |
from dateutil.parser import parse
import pandas as pd
import pandas as ExcelWriter
import numpy as np
import csv
twitter_raw_filename = '/Nike_tweets.csv'
# reading the twitter scrapped data file
tweets = pd.read_csv(twitter_raw_filename)
# setting the column of tweets dataframe
tweets.columns = ["Twitter_ID","Tweet_ID","Timestamp","Tweet_Content"]
tweets = tweets[pd.notnull(tweets["Tweet_Content"])]
#Cleaning the Twitter data to change the format of data and make the data consistent
for index, row in tweets.iterrows():
row["Tweet_Content"] = row["Tweet_Content"].strip('b\'')
row["Timestamp"] = parse(row["Timestamp"]).strftime('%d/%m/%y')
row["Timestamp"] = pd.to_datetime(row["Timestamp"])
tweets["Timestamp"] = | pd.to_datetime(tweets['Timestamp']) | pandas.to_datetime |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'contact_firstname': pd.StringDtype(),
'contact_firstname2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'current_planned_operating_date': 'datetime64[ns]',
'deliver_power_transgrid': pd.BooleanDtype(),
'duct_burners': pd.BooleanDtype(),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.StringDtype(),
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'longitude': float,
'mercury_content_ppm': float,
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.StringDtype(),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'original_planned_operating_date': 'datetime64[ns]',
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'other_combustion_tech': pd.BooleanDtype(),
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(), # Must preserve leading zeroes.
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'retirement_date': 'datetime64[ns]',
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'solid_fuel_gasification': pd.BooleanDtype(),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': | pd.BooleanDtype() | pandas.BooleanDtype |
"""
Created on Wed Oct 9 14:10:17 2019
@author: <NAME>meters
Building the graph of Athens network by using osmnx package
"""
from pneumapackage.settings import *
import osmnx as ox
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.collections import LineCollection
import networkx as nx
import pandas as pd
import geopandas as gpd
from collections import Counter, OrderedDict
from shapely.geometry import Point, LineString, Polygon
import math
import pyproj
import itertools
from operator import itemgetter
from statistics import mean
import numpy as np
from pylab import *
import pickle
import json
def create_bbox(gdf, latlon=True):
"""
Create a bounding box with the values ordered accordingly to use as input afterwards
:param gdf: geodataframe with coordinate columns
:return: bbox: ordered North, South, East, West (lat, lat, lon, lon)
"""
c1, c2 = None, None
if latlon:
assert {'lat', 'lon'}.issubset(set(gdf.columns))
c1, c2 = 'lat', 'lon'
class Box:
def __init__(self, bbox, epsg_proj=None):
"""
:param bbox: ordered North, South, East, West (lat, lat, lon, lon)
"""
self.bounding_box = bbox
self.north = bbox[0]
self.south = bbox[1]
self.east = bbox[2]
self.west = bbox[3]
self.crs_proj = epsg_proj
self.corners_lonlat = self.get_corners_lonlat()
self.corners_proj = self.get_corners_proj()
self.crs_lonlat = crs_pneuma
def get_lat(self):
return [self.north, self.south]
def get_lon(self):
return [self.east, self.west]
def get_x(self):
xs = [i[0] for i in self.corners_proj]
return xs
def get_y(self):
ys = [i[1] for i in self.corners_proj]
return ys
def get_corners_lonlat(self):
pts = [(r[0], r[1]) for r in itertools.product(self.get_lon(), self.get_lat())]
pts = [pts[0], pts[1], pts[3], pts[2]]
return pts
def get_corners_proj(self):
pts, proj = project_point(self.corners_lonlat, epsg_proj=self.crs_proj, return_proj=True)
self.crs_proj = proj
return pts
def get_polygon(self, lonlat=False):
pts = self.corners_proj
if lonlat:
pts = self.corners_lonlat
bb_polygon = Polygon(pts)
return bb_polygon
class CreateNetwork:
def __init__(self, bounding_box, network_type='drive_service', crs='epsg:4326', tags_nodes=None, tags_edges=None,
simplify_strict=False, custom_filter=None, truncate_by_edge=False):
# researched area (bounding box)
self.bounding_box = bounding_box
self.network_type = network_type
self.custom_filter = custom_filter
self.strict = simplify_strict
self.tags_nodes = tags_nodes
self.tags_edges = tags_edges
self.crs = crs
if tags_edges is None:
self.tags_edges = ['bridge', 'tunnel', 'oneway', 'lanes', 'name',
'highway', 'busway', 'busway:both', 'busway:left', 'busway:right',
'maxspeed', 'service', 'access', 'area',
'landuse', 'width', 'est_width', 'junction', 'surface', 'turn']
if tags_nodes is None:
self.tags_nodes = ['highway', 'public_transport', 'traffic_signals', 'crossing']
# download the road network from OSM
ox.config(useful_tags_way=self.tags_edges, useful_tags_node=self.tags_nodes)
self.graph_latlon = ox.graph_from_bbox(self.bounding_box[0], self.bounding_box[1], self.bounding_box[2],
self.bounding_box[3], network_type=self.network_type,
custom_filter=self.custom_filter, simplify=self.strict,
truncate_by_edge=truncate_by_edge)
self.graph_xy = ox.project_graph(self.graph_latlon)
self.graph_raw = self.graph_latlon
self.network_edges = pd.DataFrame()
self.network_nodes = pd.DataFrame()
self.used_network = pd.DataFrame()
self.mm_id = {}
self.node_tags = node_tags(self.graph_raw, tag='highway')
def network_dfs(self):
g = self.graph_latlon
if not self.strict:
g = ox.simplify_graph(g, strict=self.strict)
g = ox.add_edge_bearings(g, precision=1)
n, e = ox.graph_to_gdfs(g)
e = e.reset_index() # Method graph_to_gdfs changed to multiindex df
network_edges, network_nodes_small = dbl_cleaning(ndf=n, edf=e)
network_edges = network_edges.join(network_nodes_small, on='u')
network_edges = network_edges.rename(columns={'u': 'n1', 'y': 'lat1', 'x': 'lon1'})
network_edges = network_edges.join(network_nodes_small, on='v')
network_edges = network_edges.rename(columns={'v': 'n2', 'y': 'lat2', 'x': 'lon2'})
x1, y1 = zip(*project_point(list(zip(network_edges.lon1, network_edges.lat1))))
x2, y2 = zip(*project_point(list(zip(network_edges.lon2, network_edges.lat2))))
network_edges = network_edges.assign(x1=x1, y1=y1, x2=x2, y2=y2)
network_edges['edge'] = list(zip(network_edges['n1'].values, network_edges['n2'].values))
network_edges.reset_index(inplace=True) # From hereon the unique index of an edge is just its position in df
network_edges = network_edges.rename(columns={'index': '_id'})
self.graph_latlon = g
self.graph_xy = ox.project_graph(self.graph_latlon)
self.network_edges = network_edges
self._get_network_nodes(network_edges)
# link node_tags to specific edge, osmid not unique over edges after simplification
nearest = ox.get_nearest_edges(self.graph_xy, self.node_tags.x.to_list(), self.node_tags.y.to_list(),
method='kdtree', dist=1)
n1, n2, _ = zip(*nearest)
test_b1 = network_edges[['_id', 'edge', 'bearing']][network_edges.edge.isin(list(zip(n1, n2)))].values
test_b2 = network_edges[['_id', 'edge', 'bearing']][network_edges.edge.isin(list(zip(n2, n1)))].values
self.node_tags['edge'] = [ij for ij in zip(n1, n2)]
self.node_tags.reset_index(inplace=True)
self.node_tags = self.node_tags.merge(self.network_edges[['edge', 'bearing']], on='edge',
suffixes=('', '_edge'))
diff_b = abs(self.node_tags['bearing'] - self.node_tags['bearing_edge'])
for i, j in diff_b.iteritems():
if (j > 45) and not self.node_tags.junction[i]:
self.node_tags.at[i, 'edge'] = (self.node_tags.at[i, 'edge'][1], self.node_tags.at[i, 'edge'][0])
self.node_tags.drop('bearing_edge', axis=1, inplace=True)
self.node_tags = self.node_tags.merge(self.network_edges[['_id', 'edge', 'bearing']], on='edge',
suffixes=('', '_edge'))
diff_b2 = abs(self.node_tags['bearing'] - self.node_tags['bearing_edge'])
breakpoint()
# check if nearest edge is in right direction, problem with two way streets
self.node_tags.set_index('index', inplace=True)
self.node_tags.sort_index(inplace=True)
def plot_dbl(self, new_added=False):
network_matrix = self.network_edges
fig, ax = plt.subplots()
network_matrix.plot(ax=ax, edgecolor='lightgrey')
network_matrix[network_matrix['dbl_left']].plot(ax=ax, edgecolor='r', linewidth=3, label='DBL: Contra flow')
network_matrix[network_matrix['dbl_right']].plot(ax=ax, edgecolor='g', linewidth=3, label='DBL: With flow')
network_matrix[np.logical_and(network_matrix['dbl_right'], network_matrix['dbl_left'])].plot(
ax=ax, edgecolor='purple', linewidth=3, label='DBL: Both directions')
if new_added:
str_new = 'new_edge'
network_matrix[network_matrix['osmid'] == str_new].plot(ax=ax, edgecolor='y', linewidth=3,
label='Newly Added')
ax.legend(loc='upper left')
fig.suptitle('Dedicated bus lanes in Athens research area')
plt.show()
def plot_network_lanes(self):
# Plot graph with number of lanes, colours for categorisation of roads
G = self.graph_latlon
edge_lanes = list(G.edges.data('lanes', default='0.5'))
n_lanes = [x[2] for x in edge_lanes]
for num, i in enumerate(n_lanes):
t = type(i)
if t is list:
n_lanes[num] = [float(y) for y in n_lanes[num]]
n_lanes[num] = mean(n_lanes[num])
print(num)
else:
n_lanes[num] = float(n_lanes[num])
n_lanes = [float(x) for x in n_lanes]
## Creating a pos_list based on longitude and latitude
labels = nx.get_edge_attributes(G, 'lanes')
colors = ['lightgrey', 'r', 'orange', 'y', 'blue', 'g', 'm', 'c', 'pink', 'darkred']
keys = list(Counter(n_lanes).keys())
keys.sort()
col_dict = OrderedDict(zip(keys, colors))
print(col_dict)
lane_colors = [col_dict[x] for x in n_lanes]
fig, ax = ox.plot_graph(G, edge_linewidth=n_lanes, edge_color=lane_colors,
show=False, close=False, node_size=1)
markersize = 6
legend_elements = [0] * len(keys)
for k, v in col_dict.items():
idx = keys.index(k)
if float(k) < 1:
label = 'NaN'
idx = 0
elif float(k) == 1:
label = ' 1 lane'
idx = 1
elif float(k) > int(k):
label = f'{int(k)} to {int(k) + 1} lanes (list)'
else:
label = f'{int(k)} lanes'
legend_elements[idx] = Line2D([0], [0], marker='s', color="#061529", label=label,
markerfacecolor=col_dict[k], markersize=markersize)
ax.legend(handles=legend_elements, frameon=True, framealpha=0.7, loc='lower left',
fontsize=6)
fig.suptitle('Athens network with colors and width of edges wrt lanes')
plt.show()
def _get_network_nodes(self, network_edges):
n1 = network_edges[['n1', 'lat1', 'lon1', 'x1', 'y1']]
n2 = network_edges[['n2', 'lat2', 'lon2', 'x2', 'y2']]
n2 = n2.rename(columns={'n2': 'n1', 'lat2': 'lat1', 'lon2': 'lon1', 'x2': 'x1', 'y2': 'y1'})
n = | pd.concat([n1, n2], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from scipy.stats import pearsonr, linregress
from statsmodels.stats.multitest import multipletests
np.seterr(divide='ignore') # Hide Runtime warning regarding log(0) = -inf
import process_files
import Laplacian
import fitfunctions
import Robustness_Stability
# Plot settings
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
class DataManager(object):
# Initialization
def __init__(self, exp_data, synuclein, timepoints, seed, output_path, use_expression_values=None, file_format='png', display_plots=False):
self.seed = seed
self.output_path = output_path
# Connectivity tables
self.connectivity_ipsi = pd.read_csv("./Data83018/connectivity_ipsi.csv", index_col=0)
self.connectivity_contra = | pd.read_csv("./Data83018/connectivity_contra.csv", index_col=0) | pandas.read_csv |
"""
Imputation
https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html
fill in missing values
1. Execute the code
(in Jupyter, split it into multiple cells)
2. Understand what is happening
QUESTION: What other imputation strategies exist (check out the "strategy" parameter in the documentation)?
3. Explain to the rest of the group what you did
"""
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
df = pd.read_csv('penguins_simple.csv', sep=';')
df.iloc[3:10, 4] = np.NaN
imputer = SimpleImputer(strategy='most_frequent')
cols = df[['Body Mass (g)']]
# count the number of missing values
print(cols['Body Mass (g)'].isna().sum())
imputer.fit(cols) # learn the most frequent value
t = imputer.transform(cols) # result is a numpy array
print(t.shape)
print()
# format output as a DataFame
cols_imputed = | pd.DataFrame(t, columns=cols.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
def auto_pate(method):
"""自动添加括号"""
method = str.strip(method)
if method[-1] != ')':
if '(' not in method:
method = method + '()'
else:
method = method + ')'
return method
def back_args_str(*args, **kwargs):
largs = [f"'{str(a)}'" if isinstance(a, str) else str(a) for a in args]
kw = [str(k) + '=' + ("'" + str(v) + "'" if isinstance(v, str) else str(v)) for k, v in kwargs.items()]
largs.extend(kw)
return ','.join(largs)
def mad_based_outlier(points, thresh=3.5):
points = np.array(points)
if len(points.shape) == 1:
points = points[:, None]
median = np.median(points, axis=0)
# diff = np.sum((points - median) ** 2, axis=-1)
# diff = np.sqrt(diff)
diff = np.abs(points - median)
med_abs_deviation = np.median(diff, axis=0)
modified_z_score = 0.6745 * diff / med_abs_deviation
result = modified_z_score > thresh
return result.squeeze().tolist()
"""
贝叶斯块分箱算法
=================
这是一个自动分箱算法,基于贝叶斯块算法.来自于博客
https://jakevdp.github.io/blog/2012/09/12/dynamic-programming-in-python/
"""
import numpy as np
from sklearn.utils.multiclass import type_of_target
def bayesian_blocks(t):
# copy and sort the array
t = [x[0] for x in t]
print(t)
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
for K in range(N):
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4
fit_vec[1:] += best[:K]
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def get_bins(y, x, drop_ratio=1.0, n=3):
df1 = pd.DataFrame({'x': x, 'y': y})
justmiss = df1[['x', 'y']][df1.x.isnull()]
notmiss = df1[['x', 'y']][df1.x.notnull()]
bin_values = []
if n is None:
d1 = pd.DataFrame({'x': notmiss.x, 'y': notmiss.y, 'Bucket': notmiss.x})
else:
x_uniq = notmiss.x.drop_duplicates().to_numpy()
if len(x_uniq) <= n:
bin_values = list(x_uniq)
bin_values.sort()
else:
x_series = sorted(notmiss.x.to_numpy())
x_cnt = len(x_series)
bin_ration = np.linspace(1.0 / n, 1, n)
bin_values = list(set([x_series[int(ratio * x_cnt) - 1] for ratio in bin_ration]))
bin_values.sort()
if x_series[0] < bin_values[0]:
bin_values.insert(0, x_series[0])
if len(bin_values) == 1:
bin_values.insert(0, bin_values[0] - 1)
d1 = pd.DataFrame(
{'x': notmiss.x, 'y': notmiss.y, 'Bucket': | pd.cut(notmiss.x, bin_values, precision=8, include_lowest=True) | pandas.cut |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul])
def test_td64arr_rmul_numeric_array(self, op, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = op(vector, tdser)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="raises ValueError "
"instead of TypeError",
strict=True))
])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
scalar_td ** td1
with tm.assert_raises_regex(TypeError, pattern):
td1 ** scalar_td
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Float64Index(np.arange(5, dtype='float64')),
pd.Int64Index(np.arange(5, dtype='int64')),
pd.UInt64Index(np.arange(5, dtype='uint64'))],
ids=lambda x: type(x).__name__)
def idx(request):
return request.param
zeros = [box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]]
zeros.extend([np.array(0, dtype=dtype)
for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([0, 0.0, long(0)])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
class TestDivisionByZero(object):
def test_div_zero(self, zero, idx):
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx / zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_floordiv_zero(self, zero, idx):
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx // zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, | Series(result) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 14:50:32 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
from eotg import eotg
#%%
quotes = | pd.read_csv('quotes.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
# IMPORTANDO AS BIBLIOTECAS
"""
import pandas as pd
import gc #--> Limpar memoria
from datetime import date, datetime
from pytz import timezone
fuso_horario = timezone('America/Sao_Paulo')
data_e_hora_Manaus = datetime.today().astimezone(fuso_horario)
"""# 0. INPUTS DO USUÁRIO
## 0.1 Qual caminho principal percorrer no código:
"""
caminho = int(input("""Digite uma das opções abaixo para qual tipo de dados você quer trabalhar:
Digite 1 para Relatórios Analíticos
Digite 2 para Gerar Bases Diárias sem atualização dos Consultores
Digite 3 para Atualizar Bases Diárias com base de Consultores
Digite sua Resposta: """))
while (caminho >3 or caminho<1):
caminho = int(input("""Digite uma das opções abaixo para qual tipo de dados você quer trabalhar:
Digite 1 para Relatórios Analíticos
Digite 2 para Gerar Bases Diárias sem atualização dos Consultores
Digite 3 para Atualizar Bases Diárias com base de Consultores
Digite sua Resposta: """))
"""## 0.2. Número de Base de Consultores para Importar"""
n_consultores = int(input('Digite o número de Consultores que irá atualizar as Bases: '))
while (n_consultores > 7 or n_consultores < 1):
print("Digite um número de 1 a 7")
n_consultores = int(input('Digite o número de Consultores que irá atualizar as Bases: '))
"""## 0.3. Maneira que vai importar a Base Geral
"""
tipo_baseGeral = int(input("""Digite uma das opções abaixo para importação da Base Geral:
Digite 1 para inserir Base Geral Única
Digite 2 para inserir 2 ou mais bases)
Digite sua Resposta: """))
while (tipo_baseGeral not in [1,2]):
tipo_baseGeral = int(input("""Digite uma Resposta válida!, suas opções são:
Digite 1 para inserir Base Geral Única
Digite 2 para inserir 2 ou mais bases)
Digite sua Resposta: """))
"""# 1. CRIANDO FUNÇÕES
##1.1. Função upper
Serve Para tornar todos os elementos de uma coluna MAIÚSCULOS.
"""
def upper(df, col):
return df.assign(**{col : df[col].str.upper()})
#Recebe o nome do dataframe e o nome da coluna do dataframe
"""## 1.2. Função FiltrarBase
Serve para verificar as linhas da coluna1 ("CONSULTOR_x") da base geral, criada no merge para verificar pelo nome do candidato, quais desses candidatos pertecem a qual consultor ( da base geral de consultores).
E também para fazer a mesma coisa com a coluna2 ("CONSULTOR_y") que foi criada através do merge para verificar pelo número de candidato ("INSCRICAO) quais desses candidatos pertecem a qual consultor.
"""
def FiltrarBase(coluna1,coluna2):
if (coluna1 != " "): #verifica se os itens da coluna1 são diferentes de " ", sendo diferente retorna o proprio valor da coluna1, se igual a " " vai para o próximo if
#if (coluna2 == " "): #linha opcional
return coluna1
#else: #linha opcional
#return coluna2 #linha opcional
elif (coluna2 != " "): #veridica se os itens da coluna2 são diferentes de " ", sendo diferente retorna o prorpio valor da coluna2 se igual a " " vai para o else
return coluna2
else: #caso nehuma das condições anteriores sejam atendidas, retorna 0
return 0
"""# 2. IMPORTANDO DADOS
## 2.1. Importando Bases de Dados dos Consultores
"""
for n in range(n_consultores):
try:
globals()['df_Consultor' + str(n+1)] = pd.read_excel(f'consultor{n+1}.xlsx',sheet_name = 0, skiprows = 0)
except:
print('Nenhum arquivo encontrado')
else:
print(f'Arquivo de Consultor{n+1} carregado')
"""## 2.2. Importando Base de Dados Geral do SIA"""
lista = ['academicas','financeiras','classificados','te','desistentes']
i=0
if tipo_baseGeral == 1:
df_basegeral = pd.read_excel(f'{date.today().strftime("%d.%m.%Y")}-basegeral.xlsx',sheet_name = 0, skiprows = 0)
if tipo_baseGeral == 2:
for n in (lista):
try:
if n in ['financeiras','classificados']: #Trata de criar variáveis globais para planilhas que tem 3 abas
for g in range(3):
globals()[f'df_{n}{g}'] = pd.read_excel(f'{datetime.today().astimezone(fuso_horario).strftime("%d.%m.%Y")}-{n}.xlsx',sheet_name = g, skiprows = 0)
i+=1
#if n == "financeiras":
#globals()[f'df_Inter{n}'] = pd.concat([df_financeiras0, df_financeiras1,df_financeiras2])
#else:
#globals()[f'df_Inter{n}'] = pd.concat([df_classificados0, df_classificados1,df_classificados2])
else: #Trata de criar variáveis globais para as planilhas que tem apenas uma aba
globals()[f'df_{n}'] = pd.read_excel(f'{datetime.today().astimezone(fuso_horario).strftime("%d.%m.%Y")}-{n}.xlsx',sheet_name = 0, skiprows = 0)
i+=1
except:
print(f'A base {datetime.today().astimezone(fuso_horario).strftime("%d.%m.%Y")}-{n} não existe!')
else:
if n in ['financeiras','classificados']:
print(f'Todas as 3 Abas da base {datetime.today().astimezone(fuso_horario).strftime("%d.%m.%Y")}-{n} foram registradas com sucesso!')
else:
print(f'A base {datetime.today().astimezone(fuso_horario).strftime("%d.%m.%Y")}-{n} foi registrada com sucesso!')
print(f'O total de bases registradas foram: {i}')
"""## 3. Concatenando Tabelas do SIA"""
df_basegeral= pd.DataFrame()
if 'df_academicas' in globals():
df_basegeral = df_basegeral.append(df_academicas,ignore_index=True)
print('O DataFrame df_basegeral foi concatenado')
if'df_financeiras0' in globals():
df_basegeral = df_basegeral.append(df_financeiras0,ignore_index=True)
print('O DataFrame df_financeira0 foi concatenado')
if'df_financeiras1' in globals():
df_basegeral = df_basegeral.append(df_financeiras1,ignore_index=True)
print('O DataFrame df_financeira1 foi concatenado')
if'df_financeiras2' in globals():
df_basegeral = df_basegeral.append(df_financeiras2,ignore_index=True)
print('O DataFrame df_financeira2 foi concatenado')
if'df_te' in globals():
df_basegeral = df_basegeral.append(df_te,ignore_index=True)
print('O DataFrame df_te foi concatenado')
if'df_desistentes' in globals():
df_basegeral = df_basegeral.append(df_desistentes,ignore_index=True)
print('O DataFrame df_desistentes foi concatenado')
if'df_classificados0' in globals():
df_basegeral = df_basegeral.append(df_classificados0,ignore_index=True)
print('O DataFrame df_classificados0 concatenado')
if'df_classificados1' in globals():
df_basegeral = df_basegeral.append(df_classificados1,ignore_index=True)
print('O DataFrame df_classificados1 foi concatenado')
if'df_classificados2' in globals():
df_basegeral = df_basegeral.append(df_classificados2,ignore_index=True)
print('O DataFrame df_classificados2 foi concatenado')
"""# CONCATENANDO TABELAS DE CONSULTORES"""
# Concatenando Tabelas dentro do condicional
if n_consultores ==1:
df_Consultores = pd.concat([df_Consultor1])
elif n_consultores ==2:
df_Consultores = pd.concat([df_Consultor1,df_Consultor2])
elif n_consultores ==3:
df_Consultores = pd.concat([df_Consultor1,df_Consultor2,df_Consultor3])
elif n_consultores ==4:
df_Consultores = | pd.concat([df_Consultor1,df_Consultor2,df_Consultor3,df_Consultor4]) | pandas.concat |
#!/bin/env python
#
# Script name: IDP_html_gen.py
#
# Description: Script to generate IDP page of QC html report.
#
## Author: <NAME>
import pandas as pd
import numpy as np
import sys
import os
from ast import literal_eval
def formatter(x):
try:
return "{:e}".format(float(x))
except:
return x
def generate_full_IDPoi_data(df, IDP_dir):
"""Function that adds IDP values to an existing IDP dataframe, using the
relevant IDP txt from the subject's IDP directory. Each IDP txt file
corresponds with a IDP category.
Parameters
----------
df : pd.DataFrame
Dataframe containing details about IDPs, no values present.
IDP_dir : string
Full path to the directory containing the subject's IDP output
txt files.
Returns
----------
output : pd.DataFrame
Dataframe containing details about IDPs, with values included
"""
flag = False
#output df placeholder
output = pd.DataFrame(
columns=[
"num","short","category","num_in_cat","long","unit","dtype","description","value"
],
)
#for each IDP category, access its corresponding IDP value file
for category in df["category"].unique():
#sub-df containing only IDPs for this category
df_sub = df[df["category"] == category]
#open the caregory's IDP value txt file, clean whitespaces, and split into a df
cat_data = []
try:
with open(IDP_dir + category + ".txt") as my_file:
for line in my_file:
line = line.strip()
line = line.split(" ")
cat_data.append(line)
cat_data = | pd.DataFrame(cat_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
train = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train-1542865627584.csv")
beneficiary = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Beneficiarydata-1542865627584.csv")
inpatient = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Inpatientdata-1542865627584.csv")
outpatient = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Outpatientdata-1542865627584.csv")
tt = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test-1542969243754.csv")
tb = | pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Beneficiarydata-1542969243754.csv") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Analyses
@author: boyangzhao
"""
import os
import numpy as np
import pandas as pd
import re
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import logging
from ceres_infer.utils import *
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
######################################################################
# Supporting methods
######################################################################
def plotCountsPie(df_counts, titleTxt, filename, outdir_sub='./',
autopct='%0.1f%%', colors=None):
# plot pie chart
labels = ['%s (%d)' % (x, y) for x, y in zip(df_counts.index, df_counts.values)]
plt.figure()
plt.pie(df_counts.values, labels=labels, autopct=autopct, colors=colors)
if titleTxt is not None:
plt.title(titleTxt)
plt.axis("image")
plt.tight_layout()
if outdir_sub is not None:
plt.savefig("%s/%s_pie.pdf" % (outdir_sub, filename))
plt.close()
def getFeatN(feat_summary):
# get maximum number of features from feat_summary
s = np.concatenate([re.findall('feat_sources?(.*)$', n) for n in feat_summary.columns.values])
return max([int(n) for n in s if n.isdigit()])
def plotImpSource(impRank, df, outdir_sub='./',
autopct='%0.1f%%', src_colors_dict=None):
source_col = 'feat_source%d' % impRank
df_counts = df[df[source_col] != ''].groupby(source_col)[source_col].count()
src_colors = None if src_colors_dict is None else [src_colors_dict[s] for s in df_counts.index]
plotCountsPie(df_counts,
'Data source summary (for %s important feature)' % int2ordinal(impRank),
'global_imprank-%d' % impRank,
outdir_sub,
autopct=autopct,
colors=src_colors)
def parseGenesets(fname):
genesets = dict()
f = open(fname)
for gs in f:
gs_name = re.sub('\\t\\t.*\\n', '', gs)
genes = re.sub('.*\\t\\t', '', gs).replace('\t\n', '').split(sep='\t')
genes = np.hstack(genes)
genesets[gs_name] = genes
f.close()
return genesets
def isInSameGS(target, features, genesets):
# check if both target and feature is in the same geneset
# target is one value; features can be an array
# requires: genesets
if not isinstance(features, list):
features = [features]
isInBools = [(len(set(features).intersection(gs)) > 0) and (target not in features) and (target in gs) for _, gs in
genesets.items()]
return sum(isInBools) > 0
def isInSameGS_sources(target, features, sources, genesets):
# check if both target and feature is in the same geneset
# return concatenated sources
# requires: genesets
if not isinstance(features, list):
features = [features]
if not isinstance(sources, list):
sources = [sources]
idx = [isInSameGS(target, f, genesets) for f in features]
return '_'.join(pd.Series(sources)[idx].sort_values().unique())
def isInSameGene(target, features, func_args=''):
# check if target (gene) is in features list
# func_args is a placeholder argument
if not isinstance(features, list):
features = [features]
return (target in features)
def isInSameGene_sources(target, features, sources, func_args=''): # gene in features list of sources
# func_args is a placeholder argument
if not isinstance(features, list):
features = [features]
if not isinstance(sources, list):
sources = [sources]
idx = [isInSameGene(target, f) for f in features]
return '_'.join(pd.Series(sources)[idx].sort_values().unique())
# get contribution by importance rank and by source
def getGrpCounts(isInSame_func, isInSame_sources_func, feat_summary, func_args=None):
# tally counts for if feat and target are in the same group, with the same
# assessed by the isInSame_func function
topN = getFeatN(feat_summary)
sameGs_counts = pd.DataFrame()
sameGs_src_counts = pd.DataFrame()
feat_summary_annot = feat_summary.copy()
for i in range(1, topN + 1):
gene_col = 'feat_gene%d' % i
source_col = 'feat_source%d' % i
# get counts for same gene between target and given i-th important feature
sameGs_bool = [isInSame_func(g, f, func_args) for g, f in zip(feat_summary.target, feat_summary[gene_col])]
sameGs_counts = sameGs_counts.append(pd.DataFrame({'importanceRank': [str(i)],
'count': [sum(sameGs_bool)]}))
# break down by source, for the same gene (between target and i-th important feature)
df = feat_summary.loc[sameGs_bool,]
src_count = df[df[source_col] != ''].groupby(source_col)[source_col].count()
c = pd.DataFrame({'source': src_count.index.values,
'count': src_count.values,
'importanceRank': str(i)})
sameGs_src_counts = sameGs_src_counts.append(c)
feat_summary_annot['inSame_%d' % i] = sameGs_bool
# add in for the top N combined, if any gene in top N features are the same gene as the target
sameGs_bool = [isInSame_func(g, f, func_args) for g, f in zip(feat_summary.target, feat_summary.feat_genes)]
sameGs_counts = sameGs_counts.append(pd.DataFrame({'importanceRank': ['top%d' % topN],
'count': [sum(sameGs_bool)]}))
feat_summary_annot['inSame_top%d' % topN] = sameGs_bool
# add in for breakdown by source
sameGs_src = [isInSame_sources_func(g, f, s, func_args) for g, f, s in
zip(feat_summary.target, feat_summary.feat_genes, feat_summary.feat_sources)]
df = pd.DataFrame({'source': sameGs_src})
src_count = df[df.source != ''].groupby('source')['source'].count()
c = pd.DataFrame({'source': src_count.index.values,
'count': src_count.values,
'importanceRank': 'top%d' % topN})
sameGs_src_counts = sameGs_src_counts.append(c)
# calc percentages
sameGs_counts['percent'] = sameGs_counts['count'] * 100 / feat_summary.shape[0]
sameGs_src_counts['percent'] = sameGs_src_counts['count'] * 100 / feat_summary.shape[0]
return sameGs_counts, sameGs_src_counts, feat_summary_annot
def getGrpCounts_fromFeatSummaryAnnot(feat_summary_annot, remove_zero=True):
# get group counts, based on the feat_summary_annot file,
# useful when reading in just the feat_summary_annot and need to recreate the sameGs_counts and sameGs_src_counts
# NOTE, for the topx, here it is calculated differently (as the sum, grouped by source)
# whereas in the original sameGs_counts/sameGs_src_counts, we can try concentate the sources
# different calculations for different goals
df1 = feat_summary_annot.loc[:, feat_summary_annot.columns.str.startswith('inSame')].apply(sum, axis=0)
df1 = df1.to_frame(name='count')
df1['percent'] = df1['count'] * 100 / feat_summary_annot.shape[0]
df1['importanceRank'] = df1.index.str.extract('_(.*)').values
topx_name = [re.findall('top.*', n) for n in df1['importanceRank'].unique() if re.match('top.*', n)][0][0]
df2 = pd.DataFrame()
for n in df1['importanceRank'].unique():
if n != topx_name:
df_tmp = feat_summary_annot.groupby('feat_source%s' % n)['inSame_%s' % n].apply(sum)
df_tmp = df_tmp.to_frame(name='count')
df_tmp['percent'] = df_tmp['count'] * 100 / feat_summary_annot.shape[0]
df_tmp['importanceRank'] = n
df_tmp['source'] = df_tmp.index
df2 = pd.concat([df2, df_tmp], ignore_index=True, sort=False)
df_tmp = df2.groupby('source')['count'].apply(sum)
df_tmp = df_tmp.to_frame(name='count')
df_tmp['percent'] = df_tmp['count'] * 100 / feat_summary_annot.shape[0]
df_tmp['importanceRank'] = topx_name
df_tmp['source'] = df_tmp.index
df2 = pd.concat([df2, df_tmp], ignore_index=True, sort=False)
sameGrp_counts = df1.loc[df1['count'] > 0, :].copy()
sameGrp_src_counts = df2.loc[df2['count'] > 0, :].copy()
return sameGrp_counts, sameGrp_src_counts
def plotGrpCounts(sameGrp_counts, sameGrp_src_counts, feat_summary_annot, pfx, outdir_sub='./'):
if (np.logical_not(os.path.exists(outdir_sub))): os.mkdir(outdir_sub)
# get prefix and create new subfolder, where outputs go
pfx_cat = pfx.replace(' ', '')
outdir_sub = '%s/%s/' % (outdir_sub, pfx_cat)
if np.logical_not(os.path.exists(outdir_sub)): os.mkdir(outdir_sub)
topN = getFeatN(feat_summary_annot)
# -- csv
feat_summary_annot.to_csv('%s/feat_summary_annot.csv' % (outdir_sub))
if sameGrp_counts['count'].sum() < 1:
# no matches to group (count=0), nothing more to do
return True
# -- plots
# bar plot
plt.figure()
ax = sns.barplot('importanceRank', 'percent', data=sameGrp_counts, color='royalblue')
ax.set(xlabel='Feature rank', ylabel='%s (percent of total targets)' % pfx)
plt.tight_layout()
plt.savefig("%s/impRank_bar_pct.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.barplot('importanceRank', 'count', data=sameGrp_counts, color='royalblue')
ax.set(xlabel='Feature rank', ylabel='%s (count)' % pfx)
plt.tight_layout()
plt.savefig("%s/impRank_bar_n.pdf" % (outdir_sub))
plt.close()
plt.figure()
df = sameGrp_src_counts.pivot('importanceRank', 'source')['percent']
df = df.reindex(sameGrp_src_counts.importanceRank.unique())
ax = df.plot(kind='bar', stacked=True)
ax.set(xlabel='Feature rank', ylabel='%s (percent of total targets)' % pfx)
plt.tight_layout()
plt.savefig("%s/impRank_source_bar_pct.pdf" % (outdir_sub))
plt.close()
plt.figure()
df = sameGrp_src_counts.pivot('importanceRank', 'source')['count']
df = df.reindex(sameGrp_src_counts.importanceRank.unique())
ax = df.plot(kind='bar', stacked=True)
ax.set(xlabel='Feature rank', ylabel='%s (count)' % pfx)
plt.tight_layout()
plt.savefig("%s/impRank_source_bar_n.pdf" % (outdir_sub))
plt.close()
# pie charts
def plotGrpPies(impRankTxt, pfx):
if not any(sameGrp_counts.importanceRank.str.contains(impRankTxt)):
return None
pfx_cat = pfx.replace(' ', '')
c = sameGrp_counts.loc[sameGrp_counts.importanceRank == impRankTxt, 'count'][0]
df_counts = pd.Series({pfx: c,
'not %s' % pfx: feat_summary_annot.shape[0] - c})
plotCountsPie(df_counts,
'Of the %s important feature' % int2ordinal(impRankTxt),
'imprank-%s' % (impRankTxt),
outdir_sub)
# check the data source, of the ones where same gene as the target
c = sameGrp_src_counts.loc[sameGrp_src_counts.importanceRank == impRankTxt,]
df_counts = pd.Series(c['count'].values, index=c['source'])
plotCountsPie(df_counts,
'Of the %s important feature, feat/target %s' % (int2ordinal(impRankTxt), pfx),
'imprank-%s_source' % (impRankTxt),
outdir_sub)
plotGrpPies('1', pfx) # proportion of genes where the top feature is the same gene as the target
plotGrpPies('2', pfx) # proportion of genes where the top 2nd feature is the same gene as the target
plotGrpPies('top%d' % topN, pfx)
# Score ranked
# plt.figure(figsize=(50,7))
# ax = sns.barplot('target','score_test',
# data=feat_summary_annot.sort_values('score_test', ascending=False),
# hue='inSame_top%d'%topN)
# ax.set(xticklabels=[], xlabel='Target gene', ylabel='Score test')
# plt.title(pfx)
# plt.savefig("%s/score_test_rank.pdf" % (outdir_sub))
# plt.close()
def generate_featSummary(varExp, outdir_sub='./'):
topN = max(varExp.feat_idx) # max number of features in reduced model
varExp_noNeg = varExp.loc[varExp.score_ind > 0, :]
feature_cat = varExp_noNeg.groupby('target')['feature'].apply(lambda x: ','.join(x))
score_ind_cat = varExp_noNeg.groupby('target')['score_ind'].apply(lambda x: ','.join(round(x, 3).map(str)))
feat_summary = varExp_noNeg.groupby('target')[['target', 'score_rd', 'score_full']].first()
feat_summary = feat_summary.merge(feature_cat, left_index=True, right_index=True)
feat_summary = feat_summary.merge(score_ind_cat, left_index=True, right_index=True)
feat_summary['feat_sources'] = feat_summary.apply(lambda x: getFeatSource(x['feature']), axis=1)
feat_summary['feat_genes'] = feat_summary.apply(lambda x: getFeatGene(x['feature']), axis=1)
for i in range(1, topN + 1):
feat_summary['feat_gene%d' % i] = feat_summary.apply(
lambda x: x.feat_genes[i - 1] if len(x.feat_genes) > (i - 1) else '',
axis=1) # get the nth most important feature, per gene
feat_summary['feat_source%d' % i] = feat_summary.apply(
lambda x: x.feat_sources[i - 1] if len(x.feat_sources) > (i - 1) else '',
axis=1) # get the nth most important feature, per gene
feat_summary['feats_n'] = feat_summary.feat_genes.apply(lambda x: len(x))
feat_summary.to_csv('%s/feat_summary.csv' % outdir_sub, index=False)
return feat_summary
def plotFeatSrcCounts(feat_summary, outdir_sub='./'):
if not os.path.exists(outdir_sub): os.mkdir(outdir_sub)
# analyze feat_summary
topN = getFeatN(feat_summary)
for n in range(1, topN + 1):
plotImpSource(n, feat_summary, outdir_sub)
df_counts = pd.Series([y for x in feat_summary.feat_sources for y in x]).value_counts()
plotCountsPie(df_counts,
'Data source summary (top %d features)' % topN,
'imprank-top%d' % topN,
outdir_sub)
# number of top features per gene distribution
# plt.figure()
# ax = sns.countplot(feat_summary.feats_n, color='royalblue')
# ax.set(xlabel='Number of features in model', ylabel='Number of genes (predicted)')
# plt.title('Size of reduced model')
# plt.savefig("%s/model_size_bar.pdf" % (outdir_sub))
# plt.close()
######################################################################
# Feature analyses
######################################################################
def anlyz_varExp(varExp, suffix='', outdir_sub='./'):
# summarize the scores; given _varExp data
if not os.path.exists(outdir_sub): os.mkdir(outdir_sub)
# Score grouped by
plt.figure()
ax = sns.boxplot('feat_idx', 'score_ind', data=varExp.loc[varExp.score_ind > 0,], color='royalblue')
ax.set(xlabel='Feature rank', ylabel='Score (univariate)', yscale='log')
plt.title('Score (univariate)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/grp_score_uni_by_featRank_log.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.violinplot('feat_idx', 'score_ind', data=varExp.loc[varExp.score_ind > 0,], color='royalblue')
ax.set(xlabel='Feature rank', ylabel='Score (univariate)')
plt.title('Score (univariate)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/grp_score_uni_by_featRank.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.violinplot('feat_source', 'score_ind', data=varExp.loc[varExp.score_ind > 0, :], alpha=0.1, jitter=True)
ax.set(xlabel='Feature source', ylabel='Score (univariate)')
plt.title('Score (univariate), grouped by source\nnegative score excluded; %s' % suffix)
plt.savefig("%s/grp_score_uni_by_featSource.pdf" % (outdir_sub))
plt.close()
df = varExp.groupby('feat_idx')['score_ind'].apply(np.nanmedian)
plt.figure()
ax = sns.barplot(df.index, df, color='royalblue')
ax.set(xlabel='Feature rank', ylabel='median score (univariate)')
plt.title('median score (univariate), grouped by feature rank; %s' % suffix)
plt.savefig("%s/grp_score_uni_by_featRank_med_raw.pdf" % (outdir_sub))
plt.close()
df = varExp.loc[varExp.score_ind > 0,].groupby('feat_idx')['score_ind'].apply(np.nanmedian)
plt.figure()
ax = sns.barplot(df.index, df, color='royalblue')
ax.set(xlabel='Feature rank', ylabel='median score (univariate)')
plt.title('median score (univariate), grouped by feature rank\nnegative score excluded; %s' % suffix)
plt.savefig("%s/grp_score_uni_by_featRank_med.pdf" % (outdir_sub))
plt.close()
df = varExp.loc[varExp.score_ind > 0,].groupby('feat_source')['score_ind'].apply(np.nanmedian)
plt.figure()
ax = sns.barplot(df.index, df, color='royalblue')
ax.set(xlabel='Feature source', ylabel='median score (univariate)')
plt.title('median score (univariate), grouped by source\nnegative score excluded; %s' % suffix)
plt.savefig("%s/grp_score_uni_by_featSource_med.pdf" % (outdir_sub))
plt.close()
# Score distributions
score_vals_all = varExp.loc[varExp.score_full > 0,].groupby('target')['score_full'].apply(lambda x: x.iloc[0])
plt.figure()
ax = sns.distplot(score_vals_all)
ax.set(xlabel='Score of full model', ylabel='Count')
plt.title('Distribution of score (full model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/score_dist_all.pdf" % (outdir_sub))
plt.close()
score_vals_rd = varExp.loc[varExp.score_rd > 0,].groupby('target')['score_rd'].apply(lambda x: x.iloc[0])
plt.figure()
ax = sns.distplot(score_vals_rd)
ax.set(xlabel='Score of reduced model', ylabel='Count')
plt.title('Distribution of score (reduced model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/score_dist_rd.pdf" % (outdir_sub))
plt.close()
score_vals_uni = varExp.score_ind[varExp.score_ind > 0]
plt.figure()
ax = sns.distplot(score_vals_uni)
ax.set(xlabel='Score of univariate', ylabel='Count')
plt.title('Distribution of score (univariate model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/score_dist_uni.pdf" % (outdir_sub))
plt.close()
score_stats = pd.DataFrame({'univariate': score_vals_uni.describe(),
'reduced': score_vals_rd.describe(),
'full': score_vals_all.describe()})
score_stats.to_csv("%s/stats_score.csv" % (outdir_sub))
# Score compares
df_fullrd = varExp.groupby('target')[['score_full',
'score_rd']].first() # for full/rd, keep first, the rest are redundant (row is unique by univariate)
df = pd.concat([pd.DataFrame({'score': df_fullrd.score_full, 'label': 'full model'}),
pd.DataFrame({'score': df_fullrd.score_rd, 'label': 'reduced model'}),
pd.DataFrame({'score': varExp.score_ind, 'label': 'univariate'})])
plt.figure()
ax = sns.boxplot(x='label', y='score', data=df.loc[df.score > 0, :], color='royalblue')
ax.set(xlabel='Model', ylabel='Score')
plt.title('Score\nnegative score excluded; %s' % suffix)
plt.savefig("%s/compr_score_boxplot.pdf" % (outdir_sub))
plt.close()
df = varExp.loc[varExp.score_full > 0, :]
ax = sns.scatterplot(df.score_rd, df.score_full, s=40, alpha=0.03, color='steelblue')
ax.plot([0, 0.9], [0, 0.9], ls="--", c=".3")
ax.set(xlabel='Score reduced model', ylabel='Score full model')
plt.title('Score\nnegative score (full) excluded; %s' % suffix)
plt.savefig("%s/compr_score_scatter.pdf" % (outdir_sub))
plt.close()
df = varExp
ax = sns.scatterplot(df.score_rd, df.score_full, s=40, alpha=0.03, color='steelblue')
ax.plot([0, 0.9], [0, 0.9], ls="--", c=".3")
ax.set(xlabel='Score reduced model', ylabel='Score full model')
plt.title('Score\n%s' % suffix)
plt.savefig("%s/compr_score_scatter_all.pdf" % (outdir_sub))
plt.close()
# Score ratios
plt.figure()
ax = sns.boxplot('feat_idx', 'varExp_ofFull',
data=varExp.loc[(varExp.varExp_ofFull > 0) & (np.abs(varExp.varExp_ofFull) != np.inf),],
color='royalblue')
ax.set(xlabel='Feature rank', ylabel='Score of univariate / score of full model',
yscale='log')
plt.title('Proportion of score (univariate vs full model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/ratio_score_UniVsFull.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.boxplot('feat_idx', 'varExp_ofRd',
data=varExp.loc[(varExp.varExp_ofRd > 0) & (np.abs(varExp.varExp_ofRd) != np.inf),],
color='royalblue')
ax.set(xlabel='Feature rank', ylabel='Score of univariate / score of reduced model',
yscale='log')
plt.title('Proportion of score (univariate vs reduced model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/ratio_score_UniVsRd.pdf" % (outdir_sub))
plt.close()
df = varExp.loc[varExp.varExp_ofFull > 0,].groupby('feat_idx')['varExp_ofFull'].apply(np.nanmedian)
plt.figure()
ax = sns.barplot(df.index, df, color='royalblue')
ax.set(xlabel='Feature rank', ylabel='Score of univariate / score of full model')
plt.title('Proportion of score (univariate vs full model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/ratio_score_UniVsFull_med.pdf" % (outdir_sub))
plt.close()
df = varExp.loc[varExp.varExp_ofRd > 0,].groupby('feat_idx')['varExp_ofRd'].apply(np.nanmedian)
plt.figure()
ax = sns.barplot(df.index, df, color='royalblue')
ax.set(xlabel='Feature rank', ylabel='Score of univariate / score of reduced model')
plt.title('Proportion of score (univariate vs reduced model)\nnegative score excluded; %s' % suffix)
plt.savefig("%s/ratio_score_UniVsRd_med.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.distplot(varExp.varExp_ofRd[(varExp.varExp_ofRd > 0) & (np.abs(varExp.varExp_ofRd) != np.inf)])
ax.set(xlabel='Score of univariate / score of reduced model', ylabel='Count')
plt.title('Distribution of score univariate / score reduced\nnegative score excluded; %s' % suffix)
plt.savefig("%s/ratio_score_dist_UniVsRd.pdf" % (outdir_sub))
plt.close()
# number of features
feats_n = varExp.groupby('target')[['target', 'score_rd', 'score_full']].apply(lambda x: x.iloc[0, :]).copy()
n = varExp.loc[varExp.score_ind > 0, :].groupby('target')['target'].count()
feats_n['N'] = 0
feats_n.loc[n.index, 'N'] = n
plt.figure()
ax = sns.boxplot('N', 'score_rd', data=feats_n.loc[feats_n.N > 0, :], color='royalblue')
ax.set(xlabel='No of features', ylabel='Score (reduced model)')
plt.title('Score of target gene, stratified by number of features\nnegative score excluded; %s' % suffix)
plt.savefig("%s/nFeat_score_rd.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.countplot(feats_n.N, color='royalblue')
ax.set(xlabel='No of features', ylabel='Count (target genes)')
plt.title('Number of features, per target gene\nnegative score excluded; %s' % suffix)
plt.savefig("%s/nFeat.pdf" % (outdir_sub))
plt.close()
# statistics
f = open("%s/stats_score_median.txt" % (outdir_sub), "w")
f.write('Median score (full model): %0.2f\n' % np.nanmedian(score_vals_all))
f.write('Median score (reduced model, top %d): %0.2f\n' % (max(varExp.feat_idx), np.nanmedian(score_vals_rd)))
for n in range(1, max(varExp.feat_idx) + 1):
f.write('Median score (univariate, %s feature): %0.2f\n' % (
int2ordinal(n), np.nanmedian(varExp.loc[varExp.feat_idx == n, 'score_ind'].astype(float))))
for n in range(1, max(varExp.feat_idx) + 1):
f.write('Median score (univariate, %s feature, non-neg score): %0.2f\n' % (int2ordinal(n), np.nanmedian(
varExp.loc[varExp.score_ind > 0,].loc[varExp.feat_idx == n, 'score_ind'].astype(float))))
f.close()
def anlyz_varExp_wSource(varExp, dm_data=None, suffix='', outdir_sub='./', ):
# analyze the model results, based on merge with raw source data
if not os.path.exists(outdir_sub): os.mkdir(outdir_sub)
if dm_data is None:
# if dm_data is not given, then try to retrieve it
from src.ceres_infer.data import depmap_data
dm_data = depmap_data()
dm_data.dir_datasets = '../datasets/DepMap/'
dm_data.load_data()
dm_data.preprocess_data()
# merge with source CERES
crispr = dm_data.df_crispr.copy()
crispr.columns = pd.Series(crispr.columns).apply(getFeatGene, firstOnly=True)
crispr_stats = crispr.describe()
feat_withceres = varExp.groupby('target').first().reset_index(drop=False).loc[:,
['target', 'score_rd', 'score_full']]
feat_withceres = pd.merge(feat_withceres, crispr_stats.T, how='left', left_on='target', right_index=True)
feat_withceres.to_csv('%s/merge_ceres_score_merge.csv' % outdir_sub)
plt.figure()
ax = sns.scatterplot('mean', 'score_rd', data=feat_withceres, s=60, alpha=0.5)
ax.set(ylabel='Score (reduced model)', xlabel='CERES (mean)')
plt.title('Score of reduced model vs mean CERES; %s' % suffix)
plt.savefig("%s/merge_ceres_scoreVsMean.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.scatterplot('std', 'score_rd', data=feat_withceres, s=60, alpha=0.5)
ax.set(ylabel='Score (reduced model)', xlabel='CERES (standard deviation)')
plt.title('Score of reduced model vs std CERES; %s' % suffix)
plt.savefig("%s/merge_ceres_scoreVsSD.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.scatterplot('mean', 'std', data=feat_withceres, s=60, alpha=0.5)
ax.set(xlabel='CERES (mean)', ylabel='CERES (standard deviation)')
plt.title('mean CERES vs SD CERES ; %s' % suffix)
plt.savefig("%s/merge_ceres_meanVsSD.pdf" % (outdir_sub))
plt.close()
df = feat_withceres.copy()
df.dropna(subset=['score_rd', 'mean', 'std'], inplace=True)
df1 = df.loc[df.score_rd <= 0.2, :]
df2 = df.loc[df.score_rd > 0.2, :]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df1['mean'], df1['std'], df1['score_rd'], s=50, alpha=0.05, color='darkgray')
ax.scatter(df2['mean'], df2['std'], df2['score_rd'], s=50, alpha=0.1, color='darkred')
ax.set(xlabel='CERES (mean)', ylabel='CERES (SD)', zlabel='Score (reduced model)')
ax.view_init(azim=-120, elev=30)
plt.savefig("%s/merge_ceres_3d_meanSD.pdf" % (outdir_sub))
plt.close()
def anlyz_varExp_feats(varExp, gs_dir='../datasets/gene_sets/', outdir_sub='./'):
# analyze features
if not os.path.exists(outdir_sub): os.mkdir(outdir_sub)
feat_summary = generate_featSummary(varExp, outdir_sub)
plotFeatSrcCounts(feat_summary, '%s/featSrcCounts/' % outdir_sub)
# analyze overlay with gene sets
genesets_combined = dict()
# in same gene sets (KEGG)
genesets = parseGenesets('%s/KEGG_2019_Human.txt' % gs_dir)
genesets_combined.update(genesets)
sameGs_counts, sameGs_src_counts, feat_summary_annot = getGrpCounts(isInSameGS, isInSameGS_sources, feat_summary,
genesets)
plotGrpCounts(sameGs_counts, sameGs_src_counts, feat_summary_annot, 'in same gene set KEGG', outdir_sub)
# in same gene sets (Reactome)
genesets = parseGenesets('%s/Reactome_2016.txt' % gs_dir)
genesets_combined.update(genesets)
sameGs_counts, sameGs_src_counts, feat_summary_annot = getGrpCounts(isInSameGS, isInSameGS_sources, feat_summary,
genesets)
plotGrpCounts(sameGs_counts, sameGs_src_counts, feat_summary_annot, 'in same gene set Reactome', outdir_sub)
# in same gene sets (Panther)
genesets = parseGenesets('%s/Panther_2016.txt' % gs_dir)
genesets_combined.update(genesets)
sameGs_counts, sameGs_src_counts, feat_summary_annot = getGrpCounts(isInSameGS, isInSameGS_sources, feat_summary,
genesets)
plotGrpCounts(sameGs_counts, sameGs_src_counts, feat_summary_annot, 'in same gene set Panther', outdir_sub)
# in same gene sets (Panther/KEGG/Reactome)
genesets = genesets_combined
sameGs_counts, sameGs_src_counts, feat_summary_annot = getGrpCounts(isInSameGS, isInSameGS_sources, feat_summary,
genesets)
plotGrpCounts(sameGs_counts, sameGs_src_counts, feat_summary_annot, 'in same gene set KEGG-Panther-Reactome',
outdir_sub)
# in same gene
sameGene_counts, sameGene_src_counts, feat_summary_annot = getGrpCounts(isInSameGene, isInSameGene_sources,
feat_summary)
plotGrpCounts(sameGene_counts, sameGene_src_counts, feat_summary_annot, 'on same gene', outdir_sub)
# in same paralog
genesets = parseGenesets('%s/paralogs.txt' % gs_dir)
genesets_combined.update(genesets)
sameGs_counts, sameGs_src_counts, feat_summary_annot = getGrpCounts(isInSameGS, isInSameGS_sources, feat_summary,
genesets)
plotGrpCounts(sameGs_counts, sameGs_src_counts, feat_summary_annot, 'in same paralog', outdir_sub)
def anlyz_scoresGap(varExp, useGene_dependency, outdir_sub='./'):
# 'score' is used in the var names here, but since for AUC metrics, we
# will look at the gain (score - 0.5), the plots and outputs we will call it 'gain'
if useGene_dependency:
# the score would be AUC, just focus on feats with AUC>0.5
# and will assess based on deviation from 0.5
df = varExp.loc[varExp.score_ind > 0.5, :].copy()
df.score_full = df.score_full - 0.5
df.score_rd = df.score_rd - 0.5
df.score_ind = df.score_ind - 0.5
else:
# the score would be R2, just focus on feats with R2>0
df = varExp.loc[varExp.score_ind > 0, :].copy()
score_fullrd = df.groupby('target').first().loc[:, ['score_full', 'score_rd']]
featsN = df.groupby('target')['target'].count()
featsN.name = 'featsN'
sum_score_ind = df.groupby('target')['score_ind'].apply(sum)
sum_score_ind.name = 'sum_score_ind'
scoreVals = pd.concat([featsN, sum_score_ind], axis=1)
scoreVals = scoreVals.merge(score_fullrd, left_index=True, right_index=True)
scoreVals.reset_index(drop=False, inplace=True)
scoreVals['score_gap'] = scoreVals.score_rd - scoreVals.sum_score_ind
scoreVals['score_gap_frac'] = scoreVals.sum_score_ind / scoreVals.score_rd
# plots and stats
plt.figure()
ax = sns.distplot(scoreVals.score_gap)
ax.set(xlabel='Gain (reduced model) - gain (sum of score (univariate))', ylabel='Count')
plt.savefig('%s/gain_gap.pdf' % outdir_sub)
plt.close()
plt.figure()
ax = sns.distplot(scoreVals.score_gap_frac[np.abs(scoreVals.score_gap_frac) != np.inf])
ax.set(xlabel='Gain (sum of score (univariate))/gain (reduced model)', ylabel='Fraction')
plt.savefig('%s/gain_gap_frac.pdf' % outdir_sub)
plt.close()
totalN = scoreVals.shape[0]
f = open('%s/gain_gap_stats.txt' % outdir_sub, 'w')
f.write('Fraction of data with (rd - sum(ind)) > 0: %0.3f\n' % (sum(scoreVals.score_gap > 0) / totalN))
f.write('Fraction of data with (sum(ind)/rd) < 80%% and positive: %0.3f\n' % (
sum(scoreVals.score_gap_frac < 0.8) / totalN))
f.close()
scoreVals.to_csv('%s/gain_gap.csv' % outdir_sub, index=False)
def genBarPlotGene(model_results, gene, score_name, lineVal=None, outdir_sub='./'):
# generate bar plot, given the model results and gene
# if lineVal is not None, then will try a dotted line at the given value
df = model_results.copy()
df = df[df.target == gene]
if df.shape[0] < 1:
logging.warning('Gene %s not found in results' % gene)
return None
df['feature'][df.model == 'topfeat'] = 'topfeat'
plt.figure()
ax = sns.barplot('feature', score_name, data=df, color='royalblue')
if lineVal is not None:
ax.plot([-0.5, max(ax.get_xticks()) + 0.5], [lineVal, lineVal], 'r--', alpha=.75)
ax.set(ylabel='Score', xlabel='')
ax.set(ylim=[-0.3, 0.9])
plt.title('Target gene: %s' % (gene))
plt.xticks(rotation=-30, horizontalalignment="left")
plt.gcf().subplots_adjust(bottom=0.2)
plt.savefig("%s/%s_score_bar.pdf" % (outdir_sub, gene))
plt.close()
######################################################################
# Aggregate summary
######################################################################
def anlyz_aggRes(aggRes, params, suffix='', outdir_sub='./'):
# summarize the scores; given _varExp data
if not os.path.exists(outdir_sub): os.mkdir(outdir_sub)
if aggRes.empty: return None
score_vals_full = aggRes.groupby('target')['score_full'].apply(lambda x: x.iloc[0])
plt.figure()
ax = sns.distplot(score_vals_full)
ax.set(xlabel='Score of full model', ylabel='Count')
plt.title('Distribution of score (full model); %s' % suffix)
plt.savefig("%s/score_dist_full.pdf" % (outdir_sub))
plt.close()
score_vals_rd = aggRes.groupby('target')['score_rd'].apply(lambda x: x.iloc[0])
plt.figure()
ax = sns.distplot(score_vals_rd)
ax.set(xlabel='Score of reduced model', ylabel='Count')
plt.title('Distribution of score (reduced model); %s' % suffix)
plt.savefig("%s/score_dist_rd.pdf" % (outdir_sub))
plt.close()
score_vals_rd10 = aggRes.groupby('target')['score_rd10'].apply(lambda x: x.iloc[0])
plt.figure()
ax = sns.distplot(score_vals_rd10)
ax.set(xlabel='Score of reduced model (top 10 feat)', ylabel='Count')
plt.title('Distribution of score (reduced model top10 feat); %s' % suffix)
plt.savefig("%s/score_dist_rd10.pdf" % (outdir_sub))
plt.close()
score_stats = pd.DataFrame({'full': score_vals_full.describe(),
'reduced': score_vals_rd.describe(),
'reduced10feat': score_vals_rd10.describe()})
score_stats.to_csv("%s/stats_score.csv" % (outdir_sub))
# Score compares
df = pd.concat([pd.DataFrame({'score': aggRes.score_full, 'label': 'full model'}),
pd.DataFrame({'score': aggRes.score_rd, 'label': 'reduced model'}),
pd.DataFrame({'score': aggRes.score_rd10, 'label': 'reduced model top10 feat'})])
plt.figure()
ax = sns.boxplot(x='label', y='score', data=df.loc[df.score > 0, :], color='royalblue')
ax.set(xlabel='Model', ylabel='Score')
plt.title('Score; %s' % suffix)
plt.savefig("%s/compr_score_boxplot.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.scatterplot(aggRes.score_rd, aggRes.score_full, s=60, alpha=0.1, color='steelblue')
ax.plot([0, 0.9], [0, 0.9], ls="--", c=".3")
ax.set(xlabel='Score reduced model', ylabel='Score full model')
plt.title('Score; %s' % suffix)
plt.savefig("%s/compr_score_scatter.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.scatterplot(aggRes.score_rd, aggRes.score_rd10, s=60, alpha=0.1, color='steelblue')
ax.plot([0, 0.9], [0, 0.9], ls="--", c=".3")
ax.set(xlabel='Score reduced model', ylabel='Score reduced model top10 feat')
plt.title('Score; %s' % suffix)
plt.savefig("%s/compr_score_scatter_top10.pdf" % (outdir_sub))
plt.close()
# recall compares
plt.figure()
ax = sns.scatterplot(aggRes.recall_rd10, aggRes['external_recall_rd10'], s=60, alpha=0.1, color='steelblue')
ax.plot([0, 1.0], [0, 1.0], ls="--", c=".3")
ax.set(xlabel='Recall P19Q3 test set (rd10 model)', ylabel=f"Recall {params['external_data_name']} (rd10 model)")
plt.title('Recall; %s' % suffix)
plt.savefig("%s/compr_recall_scatter_q3_%s.pdf" % (outdir_sub, params['external_data_name']))
plt.close()
# recall vs score
plt.figure()
ax = sns.scatterplot(aggRes.score_rd10, aggRes.recall_rd10, s=60, alpha=0.1, color='steelblue')
ax.set(xlabel='Score (rd10 model)', ylabel='Recall (rd10 model)', xlim=(0, 1.1), ylim=(0, 1.1))
plt.title('Test set; %s' % suffix)
plt.savefig("%s/score_recall.pdf" % (outdir_sub))
plt.close()
plt.figure()
ax = sns.scatterplot(aggRes['external_score_rd10'], aggRes['external_recall_rd10'], s=60, alpha=0.1, color='steelblue')
ax.set(xlabel='Score (rd10 model)', ylabel='Recall (rd10 model)', xlim=(0, 1.1), ylim=(0, 1.1))
plt.title(f"{params['external_data_name']}; {suffix}")
plt.savefig(f"{outdir_sub}/score_recall_{params['external_data_name']}.pdf")
plt.close()
def anlyz_model_results(model_results, suffix='', outdir_sub='./'):
# similar to anlyz_aggRes, but instead of taking in the aggregated results data frame
# this method takes in the model_results data frame
# summarize the scores; given model_results data frame
if not os.path.exists(outdir_sub): os.mkdir(outdir_sub)
if model_results.empty:
return None
df_results = model_results.copy()
df_results = df_results.loc[df_results.model.str.match('(all|topfeat|top10feat)'), :]
# Score compares with train vs test
df1 = df_results[['model', 'score_train']].copy()
df1.rename(columns={'score_train': 'score'}, inplace=True)
df1['score_type'] = 'score_train'
df2 = df_results[['model', 'score_test']].copy()
df2['score_type'] = 'score_test'
df2.rename(columns={'score_test': 'score'}, inplace=True)
df = | pd.concat([df1, df2]) | pandas.concat |
#pylint disable=C0301
from struct import Struct, pack
from abc import abstractmethod
import inspect
from typing import List
import numpy as np
from numpy import zeros, searchsorted, allclose
from pyNastran.utils.numpy_utils import integer_types, float_types
from pyNastran.op2.result_objects.op2_objects import BaseElement, get_times_dtype
from pyNastran.f06.f06_formatting import (
write_floats_13e, write_floats_12e,
write_float_13e, # write_float_12e,
_eigenvalue_header,
)
from pyNastran.op2.op2_interface.write_utils import set_table3_field
SORT2_TABLE_NAME_MAP = {
'OEF2' : 'OEF1',
'OEFATO2' : 'OEFATO1',
'OEFCRM2' : 'OEFCRM1',
'OEFPSD2' : 'OEFPSD1',
'OEFRMS2' : 'OEFRMS1',
'OEFNO2' : 'OEFNO1',
}
TABLE_NAME_TO_TABLE_CODE = {
'OEF1' : 4,
}
class ForceObject(BaseElement):
def __init__(self, data_code, isubcase, apply_data_code=True):
self.element_type = None
self.element_name = None
self.nonlinear_factor = np.nan
self.element = None
self._times = None
BaseElement.__init__(self, data_code, isubcase, apply_data_code=apply_data_code)
def finalize(self):
"""it's required that the object be in SORT1"""
self.set_as_sort1()
def set_as_sort1(self):
"""the data is in SORT1, but the flags are wrong"""
if self.is_sort1:
return
self.table_name = SORT2_TABLE_NAME_MAP[self.table_name]
self.sort_bits[1] = 0 # sort1
self.sort_method = 1
assert self.is_sort1 is True, self.is_sort1
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
raise NotImplementedError()
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def _write_table_3(self, op2, op2_ascii, new_result, itable, itime): #itable=-3, itime=0):
import inspect
from struct import pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_table_3: %s\n' % (self.__class__.__name__, call_frame[1][3]))
#print('new_result=%s itable=%s' % (new_result, itable))
if new_result and itable != -3:
header = [
4, 146, 4,
]
else:
header = [
4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 146, 4,
]
op2.write(pack(b'%ii' % len(header), *header))
op2_ascii.write('table_3_header = %s\n' % header)
approach_code = self.approach_code
table_code = self.table_code
isubcase = self.isubcase
element_type = self.element_type
assert isinstance(self.element_type, int), self.element_type
#[
#'aCode', 'tCode', 'element_type', 'isubcase',
#'???', '???', '???', 'load_set'
#'format_code', 'num_wide', 's_code', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', 'Title', 'subtitle', 'label']
#random_code = self.random_code
format_code = self.format_code
s_code = 0 # self.s_code
num_wide = self.num_wide
acoustic_flag = 0
thermal = 0
title = b'%-128s' % self.title.encode('ascii')
subtitle = b'%-128s' % self.subtitle.encode('ascii')
label = b'%-128s' % self.label.encode('ascii')
ftable3 = b'50i 128s 128s 128s'
#oCode = 0
load_set = 0
#print(self.code_information())
ftable3 = b'i' * 50 + b'128s 128s 128s'
field6 = 0
field7 = 0
if self.analysis_code == 1:
field5 = self.loadIDs[itime]
elif self.analysis_code == 2:
field5 = self.modes[itime]
field6 = self.eigns[itime]
field7 = self.cycles[itime]
assert isinstance(field6, float), type(field6)
assert isinstance(field7, float), type(field7)
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
elif self.analysis_code == 5:
try:
field5 = self.freqs[itime]
except AttributeError: # pragma: no cover
print(self)
raise
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 6:
if hasattr(self, 'times'):
field5 = self.times[itime]
#elif hasattr(self, 'dts'):
#field5 = self.times[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find times or dts on analysis_code=8')
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 7: # pre-buckling
field5 = self.loadIDs[itime] # load set number
elif self.analysis_code == 8: # post-buckling
if hasattr(self, 'lsdvmns'):
field5 = self.lsdvmns[itime] # load set number
elif hasattr(self, 'loadIDs'):
field5 = self.loadIDs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find lsdvmns or loadIDs on analysis_code=8')
if hasattr(self, 'eigns'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')
assert isinstance(field6, float_types), type(field6)
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
elif self.analysis_code == 9: # complex eigenvalues
field5 = self.modes[itime]
if hasattr(self, 'eigns'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
field7 = self.eigis[itime]
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
elif self.analysis_code == 10: # nonlinear statics
field5 = self.load_steps[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5; load step
elif self.analysis_code == 11: # old geometric nonlinear statics
field5 = self.loadIDs[itime] # load set number
else:
raise NotImplementedError(self.analysis_code)
table3 = [
approach_code, table_code, element_type, isubcase, field5,
field6, field7, load_set, format_code, num_wide,
s_code, acoustic_flag, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, thermal, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
title, subtitle, label,
]
assert table3[22] == thermal
n = 0
for v in table3:
if isinstance(v, (int, float, np.float32)):
n += 4
elif isinstance(v, str):
#print(len(v), v)
n += len(v)
else:
#print('write_table_3', v)
n += len(v)
assert n == 584, n
data = [584] + table3 + [584]
fmt = b'i' + ftable3 + b'i'
#print(fmt)
#print(data)
#f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data))
op2_ascii.write('%s header 3c = %s\n' % (self.table_name, data))
op2.write(pack(fmt, *data))
class RealForceObject(ForceObject):
def __init__(self, data_code, isubcase, apply_data_code=True):
ForceObject.__init__(self, data_code, isubcase, apply_data_code=apply_data_code)
@property
def is_real(self):
return True
@property
def is_complex(self):
return False
"""
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( T R I A 3 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
3 STRAIN 1 20345.4805 -2
7.1402
2 0.9025 -12
7.1402
3 20342.2461 -2
20345.4805 ***
4 STRAIN 1 16806.9277 -2
38.8327
2 0.9865 -2
38.8327
3 16804.4199 -2
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( T R I A 6 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
5 STRAIN 1 21850.3184 -2
166984.4219
2 0.7301 -2
166984.4219
3 21847.9902 -2
166984.4219 ***
6 STRAIN 1 18939.8340 -2
130371.3828
2 0.7599 -1
130371.3828
3 18937.7734 -2
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( Q U A D 4 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
1 STRAIN 1 18869.6621 -2
16.2471
2 1.0418 -2
16.2471
3 18866.6074 -2
18869.6621 ***
1 CC227: CANTILEVERED COMPOSITE PLATE 3 LAYER SYMM PLY CC227 DECEMBER 5, 2011 MSC.NASTRAN 6/17/05 PAGE 15
FAILURE CRITERION IS STRAIN, STRESS ALLOWABLES, LIST STRESSES
0
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( Q U A D 8 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
2 STRAIN 1 14123.7451 -2
31.4861
2 1.0430 -2
31.4861
3 14122.1221 -2
"""
class FailureIndicesArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
def build(self):
"""sizes the vectorized attributes of the FailureIndices"""
if self.is_built:
return
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype=dtype)
self.failure_theory = np.full(self.nelements, '', dtype='U8')
self.element_layer = zeros((self.nelements, 2), dtype=idtype)
#[failure_stress_for_ply, interlaminar_stress, max_value]
self.data = zeros((self.ntimes, self.nelements, 3), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
element_layer = [self.element_layer[:, 0], self.element_layer[:, 1]]
if self.nonlinear_factor not in (None, np.nan):
# Time 0.00 0.05
# ElementID NodeID Item
# 2 1 failure_index_for_ply (direct stress/strain) 0.0 5.431871e-14
# 2 failure_index_for_bonding (interlaminar stresss) 0.0 3.271738e-16
# 3 max_value NaN NaN
# 1 failure_index_for_ply (direct stress/strain) 0.0 5.484873e-30
# 2 failure_index_for_bonding (interlaminar stresss) 0.0 3.271738e-16
# 3 max_value NaN NaN
# 1 failure_index_for_ply (direct stress/strain) 0.0 5.431871e-14
# 2 failure_index_for_bonding (interlaminar stresss) NaN NaN
# 3 max_value 0.0 5.431871e-14
column_names, column_values = self._build_dataframe_transient_header()
names = ['ElementID', 'Layer', 'Item']
data_frame = self._build_pandas_transient_element_node(
column_values, column_names, headers,
element_layer, self.data, names=names,
from_tuples=False, from_array=True)
#column_names, column_values = self._build_dataframe_transient_header()
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=element_layer, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Layer', 'Item']
else:
#Static failure_index_for_ply (direct stress/strain) failure_index_for_bonding (interlaminar stresss) max_value
#ElementID Layer
#101 1 7.153059e-07 0.0 NaN
# 2 1.276696e-07 0.0 NaN
# 3 7.153059e-07 NaN 7.153059e-07
element_layer = [self.element_layer[:, 0], self.element_layer[:, 1]]
index = pd.MultiIndex.from_arrays(element_layer, names=['ElementID', 'Layer'])
data_frame = pd.DataFrame(self.data[0], columns=headers, index=index)
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def get_headers(self) -> List[str]:
#headers = ['eid', 'failure_theory', 'ply', 'failure_index_for_ply (direct stress/strain)',
#'failure_index_for_bonding (interlaminar stresss)', 'failure_index_for_element', 'flag']
headers = ['failure_index_for_ply (direct stress/strain)',
'failure_index_for_bonding (interlaminar stresss)', 'max_value']
return headers
def __eq__(self, table): # pragma: no cover
return True
def add_sort1(self, dt, eid, failure_theory, ply_id, failure_stress_for_ply, flag,
interlaminar_stress, max_value, nine):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element_layer[self.ielement] = [eid, ply_id]
self.failure_theory[self.ielement] = failure_theory
self.data[self.itime, self.ielement, :] = [failure_stress_for_ply, interlaminar_stress, max_value]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes = self.data.shape[0]
nelements = self.data.shape[1]
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
return [] # raise NotImplementedError('this should be overwritten by %s' % (self.__class__.__name__))
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
f06_file.write('skipping FailureIndices f06\n')
return page_num
#NotImplementedError(self.code_information())
#asd
#if self.is_sort1:
#page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg_temp)
#else:
#raise NotImplementedError(self.code_information())
#page_num = self._write_sort2_as_sort2(header, page_stamp, page_num, f06_file, msg_temp)
#' F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( T R I A 3 )\n'
#' ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG\n'
#' ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES\n'
#' 1 HOFFMAN 101 6.987186E-02 \n'
#' 1.687182E-02 \n'
#' 102 9.048269E-02 \n'
#' 1.721401E-02 \n'
#return page_num
class RealSpringDamperForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
#if not is_sort1:
#raise NotImplementedError('SORT2')
@classmethod
def add_static_case(cls, table_name, element_name, element, data, isubcase,
is_sort1=True, is_random=False, is_msc=True,
random_code=0, title='', subtitle='', label=''):
analysis_code = 1 # static
data_code = oef_data_code(table_name, analysis_code,
is_sort1=is_sort1, is_random=is_random,
random_code=random_code,
title=title, subtitle=subtitle, label=label,
is_msc=is_msc)
data_code['loadIDs'] = [0] # TODO: ???
data_code['data_names'] = []
# I'm only sure about the 1s in the strains and the
# corresponding 0s in the stresses.
#if is_stress:
#data_code['stress_bits'] = [0, 0, 0, 0]
#data_code['s_code'] = 0
#else:
#data_code['stress_bits'] = [0, 1, 0, 1]
#data_code['s_code'] = 1 # strain?
element_name_to_element_type = {
'CELAS1' : 11,
'CELAS2' : 12,
'CELAS3' : 13,
'CELAS4' : 14,
}
element_type = element_name_to_element_type[element_name]
data_code['element_name'] = element_name
data_code['element_type'] = element_type
#data_code['load_set'] = 1
ntimes = data.shape[0]
nnodes = data.shape[1]
dt = None
obj = cls(data_code, is_sort1, isubcase, dt)
obj.element = element
obj.data = data
obj.ntimes = ntimes
obj.ntotal = nnodes
obj._times = [None]
obj.is_built = True
return obj
def build(self):
"""sizes the vectorized attributes of the RealSpringDamperForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self.build_data(self.ntimes, self.nelements, dtype, idtype, fdtype)
def build_data(self, ntimes, nelements, dtype, idtype, fdtype):
"""actually performs the build step"""
self.ntimes = ntimes
self.nelements = nelements
self._times = zeros(ntimes, dtype=dtype)
self.element = zeros(nelements, dtype=idtype)
#[force]
self.data = zeros((ntimes, nelements, 1), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Item
#30 spring_force 2.388744e-19 -1.268392e-10 -3.341473e-19
#31 spring_force 2.781767e-19 -3.034770e-11 -4.433221e-19
#32 spring_force 0.000000e+00 0.000000e+00 0.000000e+00
#33 spring_force 0.000000e+00 0.000000e+00 0.000000e+00
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static spring_force
#ElementID
#30 0.0
#31 0.0
#32 0.0
#33 0.0
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
np.isnan(self.nonlinear_factor) and
np.isnan(table.nonlinear_factor)
)
if not is_nan:
assert self.nonlinear_factor == table.nonlinear_factor
assert self.ntotal == table.ntotal
assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)
assert self.approach_code == table.approach_code
if self.nonlinear_factor not in (None, np.nan):
assert np.array_equal(self._times, table._times), 'ename=%s-%s times=%s table.times=%s' % (
self.element_name, self.element_type, self._times, table._times)
if not np.array_equal(self.element, table.element):
assert self.element.shape == table.element.shape, 'shape=%s element.shape=%s' % (self.element.shape, table.element.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\nEid1, Eid2\n' % str(self.code_information())
for eid, eid2 in zip(self.element, table.element):
msg += '%s, %s\n' % (eid, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(force1, stress1) = t1
(force2, stress2) = t2
if not allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s)\n (%s, %s)\n' % (
eid,
force1, stress1,
force2, stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, force):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#print('dt=%s eid=%s' % (dt, eid))
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [force]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes = self.data.shape[0]
nelements = self.data.shape[1]
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
raise NotImplementedError('this should be overwritten by %s' % (self.__class__.__name__))
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
if self.is_sort1:
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg_temp)
else:
raise NotImplementedError(self.code_information())
#page_num = self._write_sort2_as_sort2(header, page_stamp, page_num, f06_file, msg_temp)
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f06_file, msg_temp):
ntimes = self.data.shape[0]
eids = self.element
nwrite = len(eids)
nrows = nwrite // 4
nleftover = nwrite - nrows * 4
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
stress = self.data[itime, :, 0]
out = []
for eid, stressi in zip(eids, stress):
out.append([eid, write_float_13e(stressi)])
for i in range(0, nrows * 4, 4):
f06_file.write(' %10i %13s %10i %13s %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1] + out[i + 2] + out[i + 3])))
i = nrows * 4
if nleftover == 3:
f06_file.write(' %10i %13s %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1] + out[i + 2])))
elif nleftover == 2:
f06_file.write(' %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1])))
elif nleftover == 1:
f06_file.write(' %10i %13s\n' % tuple(out[i]))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'if')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
force = self.data[itime, :, 0]
for eid, forcei in zip(eids_device, force):
data = [eid, forcei]
op2_ascii.write(' eid=%s force=%s\n' % tuple(data))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealSpringForceArray(RealSpringDamperForceArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['spring_force']
return headers
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if self.element_type == 11: # CELAS1
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12: # CELAS2
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13: # CELAS3
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14: # CELAS4
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
else: # pragma: no cover
msg = 'element_name=%s element_type=%s' % (self.element_name, self.element_type)
raise NotImplementedError(msg)
msg += [
' ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE\n'
' ID. ID. ID. ID.\n'
]
return msg
class RealDamperForceArray(RealSpringDamperForceArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['damper_force']
return headers
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if self.element_type == 20: # CDAMP1
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 1 )\n']
elif self.element_type == 21: # CDAMP2
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 2 )\n']
elif self.element_type == 22: # CDAMP3
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 3 )\n']
elif self.element_type == 23: # CDAMP4
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 4 )\n']
else: # pragma: no cover
msg = 'element_name=%s element_type=%s' % (self.element_name, self.element_type)
raise NotImplementedError(msg)
if is_sort1:
msg += [
' ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE\n'
' ID. ID. ID. ID.\n'
]
else:
msg += [
' AXIAL AXIAL\n'
' TIME FORCE TORQUE TIME FORCE TORQUE\n'
]
return msg
class RealRodForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
@classmethod
def add_static_case(cls, table_name, element_name, element, data, isubcase,
is_sort1=True, is_random=False, is_msc=True,
random_code=0, title='', subtitle='', label=''):
analysis_code = 1 # static
data_code = oef_data_code(table_name, analysis_code,
is_sort1=is_sort1, is_random=is_random,
random_code=random_code,
title=title, subtitle=subtitle, label=label,
is_msc=is_msc)
data_code['loadIDs'] = [0] # TODO: ???
data_code['data_names'] = []
# I'm only sure about the 1s in the strains and the
# corresponding 0s in the stresses.
#if is_stress:
#data_code['stress_bits'] = [0, 0, 0, 0]
#data_code['s_code'] = 0
#else:
#data_code['stress_bits'] = [0, 1, 0, 1]
#data_code['s_code'] = 1 # strain?
element_name_to_element_type = {
'CROD' : 1,
'CTUBE' : 3,
'CONROD' : 10,
}
element_type = element_name_to_element_type[element_name]
data_code['element_name'] = element_name
data_code['element_type'] = element_type
#data_code['load_set'] = 1
ntimes = data.shape[0]
nnodes = data.shape[1]
dt = None
obj = cls(data_code, is_sort1, isubcase, dt)
obj.element = element
obj.data = data
obj.ntimes = ntimes
obj.ntotal = nnodes
obj._times = [None]
obj.is_built = True
return obj
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['axial', 'torsion']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def _get_msgs(self):
base_msg = [' ELEMENT AXIAL TORSIONAL ELEMENT AXIAL TORSIONAL\n',
' ID. FORCE MOMENT ID. FORCE MOMENT\n']
crod_msg = [' F O R C E S I N R O D E L E M E N T S ( C R O D )\n', ]
conrod_msg = [' F O R C E S I N R O D E L E M E N T S ( C O N R O D )\n', ]
ctube_msg = [' F O R C E S I N R O D E L E M E N T S ( C T U B E )\n', ]
crod_msg += base_msg
conrod_msg += base_msg
ctube_msg += base_msg
return crod_msg, conrod_msg, ctube_msg
def build(self):
"""sizes the vectorized attributes of the RealRodForceArray"""
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self.build_data(self.ntimes, self.nelements, float_fmt='float32')
def build_data(self, ntimes, nelements, float_fmt='float32'):
"""actually performs the build step"""
self.ntimes = ntimes
self.nelements = nelements
#self.ntotal = ntimes * nelements
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(ntimes, dtype=dtype)
self.element = zeros(nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((ntimes, nelements, 2), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static axial SMa torsion SMt
#ElementID
#14 0.0 1.401298e-45 0.0 1.401298e-45
#15 0.0 1.401298e-45 0.0 1.401298e-45
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
if self.ielement == self.nelements:
self.ielement = 0
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True):
crod_msg, conrod_msg, ctube_msg = self._get_msgs()
if 'CROD' in self.element_name:
msg = crod_msg
elif 'CONROD' in self.element_name:
msg = conrod_msg
elif 'CTUBE' in self.element_name:
msg = ctube_msg
return self.element_name, msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, msg_temp) = self.get_f06_header(is_mag_phase)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
is_odd = False
nwrite = len(eids)
if len(eids) % 2 == 1:
nwrite -= 1
is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
out = []
for eid, axiali, torsioni in zip(eids, axial, torsion):
[axiali, torsioni] = write_floats_13e([axiali, torsioni])
out.append([eid, axiali, torsioni])
for i in range(0, nwrite, 2):
out_line = ' %8i %-13s %-13s %8i %-13s %s\n' % tuple(out[i] + out[i + 1])
f06_file.write(out_line)
if is_odd:
out_line = ' %8i %-13s %s\n' % tuple(out[-1])
f06_file.write(out_line)
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
np.isnan(self.nonlinear_factor) and
np.isnan(table.nonlinear_factor)
)
if not is_nan:
assert self.nonlinear_factor == table.nonlinear_factor
assert self.ntotal == table.ntotal
assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)
assert self.approach_code == table.approach_code
if self.nonlinear_factor not in (None, np.nan):
assert np.array_equal(self._times, table._times), 'ename=%s-%s times=%s table.times=%s' % (
self.element_name, self.element_type, self._times, table._times)
if not np.array_equal(self.element, table.element):
assert self.element.shape == table.element.shape, 'element shape=%s table.shape=%s' % (self.element.shape, table.element.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid\n'
for eid, eid2 in zip(self.element, table.element):
msg += '%s, %s\n' % (eid, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not np.array_equal(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i2f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
#print('eids3', eids3)
for eid, axiali, torsioni in zip(eids_device, axial, torsion):
data = [eid, axiali, torsioni]
op2_ascii.write(' eid=%s axial=%s torsion=%s\n' % tuple(data))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealCBeamForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
#ForceObject.__init__(self, data_code, isubcase)
RealForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
self.itime = 0
self.nelements = 0 # result specific
#if is_sort1:
##sort1
#pass
#else:
#raise NotImplementedError('SORT2')
def build(self):
"""sizes the vectorized attributes of the RealCBeamForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (self.ntimes, self.nelements, self.ntotal, self.subtitle))
if self.is_built:
return
nnodes = 11
#self.names = []
#self.nelements //= nnodes
self.nelements //= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype)
self.element = zeros(self.ntotal, idtype)
self.element_node = zeros((self.ntotal, 2), idtype)
# the number is messed up because of the offset for the element's properties
if not (self.nelements * nnodes) == self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (self.ntimes,
self.nelements, nnodes,
self.nelements * nnodes,
self.ntotal)
raise RuntimeError(msg)
#[sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.data = zeros((self.ntimes, self.ntotal, 8), fdtype)
def finalize(self):
sd = self.data[0, :, 0]
i_sd_zero = np.where(sd != 0.0)[0]
i_node_zero = np.where(self.element_node[:, 1] != 0)[0]
assert i_node_zero.max() > 0, 'CBEAM element_node hasnt been filled'
i = np.union1d(i_sd_zero, i_node_zero)
#self.nelements = len(self.element) // 11
self.element = self.element[i]
self.element_node = self.element_node[i, :]
self.data = self.data[:, i, :]
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
element_location = [
self.element_node[:, 0],
self.data[0, :, 0],
]
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Location Item
#12 0.0 bending_moment1 1.505494e-13 -2.554764e-07 -5.272747e-13
# bending_moment2 -2.215085e-13 -2.532377e-07 3.462328e-13
# shear1 1.505494e-13 -2.554763e-07 -5.272747e-13
# shear2 -2.215085e-13 -2.532379e-07 3.462328e-13
# axial_force 1.294136e-15 -1.670896e-09 4.759476e-16
# total_torque -4.240346e-16 2.742446e-09 1.522254e-15
# warping_torque 0.000000e+00 0.000000e+00 0.000000e+00
# 1.0 bending_moment1 0.000000e+00 -1.076669e-13 1.009742e-28
# bending_moment2 -5.048710e-29 1.704975e-13 0.000000e+00
# shear1 1.505494e-13 -2.554763e-07 -5.272747e-13
# shear2 -2.215085e-13 -2.532379e-07 3.462328e-13
# axial_force 1.294136e-15 -1.670896e-09 4.759476e-16
# total_torque -4.240346e-16 2.742446e-09 1.522254e-15
# warping_torque 0.000000e+00 0.000000e+00 0.000000e+00
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers[1:], element_location, self.data[:, :, 1:], from_tuples=False, from_array=True)
data_frame.index.names = ['ElementID', 'Location', 'Item']
else:
df1 = | pd.DataFrame(element_location) | pandas.DataFrame |
"""Time series feature generators as Scikit-Learn compatible transformers."""
from itertools import combinations
from typing import List, Optional
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.utils.validation import check_is_fitted
from tsfeast.funcs import (
get_change_features,
get_datetime_features,
get_difference_features,
get_ewma_features,
get_lag_features,
get_rolling_features,
)
from tsfeast.utils import Data, array_to_dataframe
class BaseTransformer(BaseEstimator, TransformerMixin):
"""Base transformer object."""
def __init__(self, fillna: bool = True):
"""Instantiate transformer object."""
self.fillna = fillna
def transform(self, X: Data, y=None) -> Data:
"""
Transform fitted data.
Parameters
----------
X: array of shape [n_samples, n_features]
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Array-like object of transformed data.
Notes
-----
Scikit-Learn Pipelines only call the `.transform()` method during the `.predict()` method,
which is appropriate to prevent data leakage in predictions. However, most of the
transformers in this module take a set of features and generate new features; there's no
inherent method to transform some timeseries features given a fitted estimator.
For time series lags, changes, etc., we have access to past data for feature
generation without risk of data leakage; certain features (e.g. lags) require this
to avoid NaNs or zeros.
We append new X to our original features and transform on entire dataset, keeping
only the last n rows. Appropriate for time series transformations, only.
"""
if isinstance(X, np.ndarray):
X = array_to_dataframe(X)
if hasattr(self, 'input_features_'):
rows = X.shape[0]
X = pd.concat([self.input_features_, X]) # pylint: disable=E0203
self.output_features_ = self._transform(X, y).iloc[-rows:, :]
if self.fillna:
return self.output_features_.fillna(0)
return self.output_features_
self.input_features_: pd.DataFrame = X
self.n_features_in_ = X.shape[0]
self.output_features_ = self._transform(X, y)
self.feature_names_ = self.output_features_.columns
if self.fillna:
return self.output_features_.fillna(0)
return self.output_features_
def get_feature_names(self) -> List[str]:
"""Get list of feature names."""
check_is_fitted(self)
return list(self.feature_names_)
def _transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""
Transform input data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
raise NotImplementedError
def fit(self, X: Data, y=None) -> "BaseTransformer":
"""
Fit transformer object to data.
Parameters
----------
X: array of shape [n_samples, n_features]
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
BaseTransformer
Self.
"""
_, _ = X, y
return self
class OriginalFeatures(BaseTransformer):
"""Return original features."""
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return X
class Scaler(BaseTransformer):
"""Wrap StandardScaler to maintain column names."""
def __init__(self):
"""Instantiate transformer object."""
super().__init__()
self.scaler = StandardScaler()
def fit(self, X: pd.DataFrame, y=None) -> "Scaler":
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
self.scaler.fit(X)
return self
def _transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
return self
def transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
self.feature_names_ = X.columns
return pd.DataFrame(
self.scaler.transform(X),
columns=X.columns,
index=X.index
)
def inverse_transform(self, X: pd.DataFrame, copy: bool = True) -> pd.DataFrame:
"""
Transform scaled data into original feature space.
Parameters
----------
X: pd.DataFrame
The input samples.
copy: bool
Default True; if False, try to avoid a copy and do inplace scaling instead.
Returns
-------
Data
Data in original feature space.
"""
return pd.DataFrame(
self.scaler.inverse_transform(X, copy=copy),
columns=self.feature_names_,
index=X.index
)
class DateTimeFeatures(BaseTransformer):
"""Generate datetime features."""
def __init__(
self, date_col: Optional[str] = None, dt_format: Optional[str] = None,
freq: Optional[str] = None
):
"""
Instantiate transformer object.
date_col: Optional[str]
Column name containing date/timestamp.
dt_format: Optional[str]
Date/timestamp format, e.g. `%Y-%m-%d` for `2020-01-31`.
"""
super().__init__()
self.date_col = date_col
self.dt_format = dt_format
self.freq = freq
def fit(self, X: Data, y=None) -> "DateTimeFeatures":
_ = y
if isinstance(X, pd.DataFrame):
dates = X[self.date_col]
elif isinstance(X, pd.Series):
dates = X
else:
raise ValueError('`data` must be a DataFrame or Series.')
if not self.freq:
self.freq = pd.infer_freq(
pd.DatetimeIndex(pd.to_datetime(dates, format=self.dt_format))
)
return self
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return get_datetime_features(X, self.date_col, dt_format=self.dt_format, freq=self.freq)
class LagFeatures(BaseTransformer):
"""Generate lag features."""
def __init__(self, n_lags: int, fillna: bool = True):
"""
Instantiate transformer object.
Parameters
----------
n_lags: int
Number of lags to generate.
"""
super().__init__(fillna=fillna)
self.n_lags = n_lags
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return get_lag_features(X, n_lags=self.n_lags)
class RollingFeatures(BaseTransformer):
"""Generate rolling features."""
def __init__(self, window_lengths: List[int], fillna: bool = True):
"""
Instantiate transformer object.
Parameters
----------
window_lengths: L:ist[int]
Length of window(s) to create.
"""
super().__init__(fillna=fillna)
self.window_lengths = window_lengths
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return get_rolling_features(X, window_lengths=self.window_lengths)
class EwmaFeatures(BaseTransformer):
"""Generate exponentially-weighted moving-average features."""
def __init__(self, window_lengths: List[int], fillna: bool = True):
"""
Instantiate transformer object.
Parameters
----------
window_lengths: L:ist[int]
Length of window(s) to create.
"""
super().__init__(fillna=fillna)
self.window_lengths = window_lengths
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return get_ewma_features(X, window_lengths=self.window_lengths)
class ChangeFeatures(BaseTransformer):
"""Generate period change features."""
def __init__(self, period_lengths: List[int], fillna: bool = True):
"""
Instantiate transformer object.
Parameters
----------
period_lengths: List[int]
Length of period[s] to generate change features.
"""
super().__init__(fillna=fillna)
self.period_lengths = period_lengths
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return get_change_features(X, period_lengths=self.period_lengths)
class DifferenceFeatures(BaseTransformer):
"""Generate difference features."""
def __init__(self, n_diffs: int, fillna: bool = True):
"""
Instantiate transformer object.
Parameters
----------
n_diffs: int
Number of differences to calculate.
"""
super().__init__(fillna=fillna)
self.n_diffs = n_diffs
def _transform(self, X: pd.DataFrame, y=None) -> Data:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
return get_difference_features(X, n_diffs=self.n_diffs)
class PolyFeatures(BaseTransformer):
"""Generate polynomial features."""
def __init__(self, degree=2):
"""
Instantiate transformer object.
Parameters
----------
degree: int
Degree of polynomial to use.
"""
super().__init__()
self.degree = degree
def _transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""
Fit transformer object to data.
Parameters
----------
X: pd.DataFrame
The input samples.
y: None
Not used; included for compatibility, only.
Returns
-------
Data
Transformed features.
"""
poly = []
df = X.copy()
for i in range(2, self.degree + 1):
poly.append(
pd.DataFrame(
df.values ** i,
columns=[f'{c}^{i}' for c in df.columns],
index=df.index
)
)
return | pd.concat(poly, axis=1) | pandas.concat |
"""
This script save the direct/indirect effects for each neuron averaging across different groups depending
on negation type and correctness category.
Usage:
python compute_and_save_neuron_agg_effect.py $result_file_path $model_name $negation_test_set_file
"""
import os
import sys
import json
import pandas as pd
def get_correctness_category(results_df):
orig_label = results_df['orig_label'].all()
neg_label = results_df['neg_label'].all()
orig_assigned = False if float(results_df['candidate1_orig_prob'].unique()) > 0.5 else True
neg_assigned = False if float(results_df['candidate1_neg_prob'].unique()) > 0.5 else True
orig_correctness = "c" if orig_label == orig_assigned else "i"
neg_correctness = "c" if neg_label == neg_assigned else "i"
return "_".join([orig_correctness, neg_correctness])
def analyze_effect_results(results_df, effect):
# Calculate response variable under the null condition and with the neuron intervention
if results_df["orig_label"].all() == True:
odds_base = (
results_df["candidate1_orig_prob"] / results_df["candidate2_orig_prob"]
)
odds_intervention = (
results_df["candidate1_prob"] / results_df["candidate2_prob"]
)
else:
odds_base = (
results_df["candidate2_orig_prob"] / results_df["candidate1_orig_prob"]
)
odds_intervention = (
results_df["candidate2_prob"] / results_df["candidate1_prob"]
)
odds_ratio = odds_intervention / odds_base
results_df["odds_ratio"] = odds_ratio
# Add correctness category to dataframe
results_df["correctness_cat"] = get_correctness_category(results_df=results_df)
# Get the odds ratio for each neuron in each layer
results_df = results_df.pivot("neuron", "layer", "odds_ratio")
def get_all_effects(fname):
"""
Give fname from a direct effect file
"""
# Step 1: Load results for current file
print(fname)
indirect_result_df = pd.read_csv(fname)
analyze_effect_results(
results_df=indirect_result_df, effect="indirect"
)
fname = fname.replace("_indirect_", "_direct_")
direct_result_df = pd.read_csv(fname)
analyze_effect_results(
results_df=direct_result_df, effect="direct"
)
# Step 2: Join the two DF's
total_df = direct_result_df.join(
indirect_result_df, lsuffix="_direct", rsuffix="_indirect"
)[
[
"layer_direct",
"neuron_direct",
"odds_ratio_indirect",
"odds_ratio_direct"
]
]
total_df["total_causal_effect"] = (
total_df["odds_ratio_indirect"] + total_df["odds_ratio_direct"] - 1
)
total_df["correctness_cat"] = direct_result_df["correctness_cat"]
return total_df
def main(folder_name, model_name, neg_test_file = 'neg_test.json'):
# Load the negation type for each negated sample
neg_types = {}
with open(neg_test_file, 'r') as test_set:
for line in test_set:
js_line = json.loads(line)
try:
neg_types[str(js_line["identifier"])] = str(js_line["negation_type"])
except KeyError:
neg_types[str(js_line["identifier"])] = None
# Get all direct and indirect effect files
fnames = [
f
for f in os.listdir(folder_name)
if "_" + model_name + ".csv" in f and f.endswith("csv")
]
paths = [os.path.join(folder_name, f) for f in fnames]
files = [
f
for f in paths
if "indirect" in f
if os.path.exists(f.replace("indirect", "direct"))
]
# Prepare dataframes for each of the 6 negation types
va_dfs = []
ve_dfs = []
npne_dfs = []
npe_dfs = []
n2n_dfs = []
sw_dfs = []
ntypes = {"v-action" : va_dfs, "v-existential" : ve_dfs, "np-nonexistential" : npne_dfs, "np-existential" : npe_dfs, "np-num2none" : n2n_dfs, "sw" : sw_dfs}
for path in files:
# Get negation type of current example
id_neg = path.split("/")[-1].split("_")[1]
ntype = neg_types[id_neg]
# Get indirect and direct effects
ntypes[ntype].append(get_all_effects(path))
va_df = | pd.concat(va_dfs) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
| pd.testing.assert_frame_equal(called[0][0][0], table_data) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
| pdt.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, to_timedelta
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
try:
import pandas.tseries.holiday
except ImportError:
pass
class IntFrameWithScalar:
params = [
[np.float64, np.int64],
[2, 3.0, np.int32(4), np.float64(5)],
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
],
]
param_names = ["dtype", "scalar", "op"]
def setup(self, dtype, scalar, op):
arr = np.random.randn(20000, 100)
self.df = DataFrame(arr.astype(dtype))
def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
class OpWithFillValue:
def setup(self):
# GH#31300
arr = np.arange(10 ** 6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
def time_frame_op_with_fill_value_no_nas(self):
self.df.add(self.df, fill_value=4)
def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
class MixedFrameWithSeriesAxis:
params = [
[
"eq",
"ne",
"lt",
"le",
"ge",
"gt",
"add",
"sub",
"truediv",
"floordiv",
"mul",
"pow",
]
]
param_names = ["opname"]
def setup(self, opname):
arr = np.arange(10 ** 6).reshape(1000, -1)
df = | DataFrame(arr) | pandas.DataFrame |
"""
#--------------------------------
# Name:npmrds_data_conflation_cmp_batch.py
# Purpose: Get distance-weighted average speed from NPMRDS data for CMP deficient corridors,
# make chart images. If multiple years of input data provided, then charts
# showing year-year changes will be created.
# Author: <NAME>
# Last Updated: Jul 2020
# Updated by: <name>
# Copyright: (c) SACOG
# Python Version: 3.x
#--------------------------------
"""
import os
import re
import datetime as dt
import time
import pdb
import arcpy
import pandas as pd
import make_project_barcharts as mkbar
# import plotly
# import plotly.express as px
# from plotly.offline import plot
# orca_path = r"C:\Users\dconly\AppData\Local\ESRI\conda\envs\arcgispro-py3-clone\orca_app\orca.exe"
# plotly.io.orca.config.executable = orca_path
arcpy.env.overwriteOutput = True
# Esri start of added variables
g_ESRI_variable_1 = 'fl_splitprojlines'
g_ESRI_variable_2 = 'fl_splitproj_w_tmcdata'
g_ESRI_variable_3 = "{} = '{}'"
g_ESRI_variable_4 = '{} IS NOT NULL'
g_ESRI_variable_5 = os.path.join(arcpy.env.packageWorkspace,'index')
g_ESRI_variable_6 = 'fl_project'
g_ESRI_variable_7 = 'fl_speed_data'
g_ESRI_variable_8 = '{} IN {}'
g_ESRI_variable_9 = 'fl_tmc_buff'
# Esri end of added variables
dateSuffix = str(dt.datetime.now().strftime('%Y%m%d_%H%M'))
# ====================FUNCTIONS==========================================
def esri_object_to_df(in_esri_obj, esri_obj_fields, index_field=None):
'''converts esri gdb table, feature class, feature layer, or SHP to pandas dataframe'''
data_rows = []
with arcpy.da.SearchCursor(in_esri_obj, esri_obj_fields) as cur:
for row in cur:
out_row = list(row)
data_rows.append(out_row)
out_df = pd.DataFrame(data_rows, index=index_field, columns=esri_obj_fields)
return out_df
class NPMRDS:
def __init__(self, fc_speed_data):
# speed data attributes
self.fc_speed_data = fc_speed_data
self.col_ff_speed = "ff_speed_art60thp"
self.col_congest_speed = "havg_spd_worst4hrs"
self.col_reliab_ampk = "lottr_ampk"
self.col_reliab_md = "lottr_md"
self.col_reliab_pmpk = "lottr_pmpk"
self.col_reliab_wknd = "lottr_wknd"
self.col_tmcdir = "direction_signd" # needs to be the "BOUND" direction versions (e.g. NORTHBOUND) from the TMC spec CSV
self.col_roadtype = "F_System" # indicates if road is freeway or not, so that data from freeways doesn't affect data on surface streets, and vice-versa
for f in [self.col_ff_speed, self.col_congest_speed, self.col_reliab_ampk,
self.col_reliab_md, self.col_reliab_pmpk, self.col_reliab_wknd,
self.col_tmcdir, self.col_roadtype]:
if f not in [field.name for field in arcpy.ListFields(self.fc_speed_data)]:
raise Exception("Field {} not present in speed data shapefile/feature class." \
"Please update field names in the __init__ function of the NPMRDS class as needed.".format(f))
# each item in this dict corresponds to a separate chart set
self.dict_perf_cats = {"speed": [self.col_ff_speed, self.col_congest_speed],
"lottr": [self.col_reliab_ampk, self.col_reliab_md,
self.col_reliab_pmpk, self.col_reliab_wknd]
}
# values used to indicate method for getting multi-TMC average values
self.calc_distwt_avg = "distance_weighted_avg"
self.calc_inv_avg = "inv_avg_spd"
# specify the type of calculation for each field in order to aggregate to project line
self.spd_data_calc_dict = {self.col_ff_speed: self.calc_inv_avg,
self.col_congest_speed: self.calc_inv_avg,
self.col_reliab_ampk: self.calc_distwt_avg,
self.col_reliab_md: self.calc_distwt_avg,
self.col_reliab_pmpk: self.calc_distwt_avg,
self.col_reliab_wknd: self.calc_distwt_avg}
self.ptype_fwy = "Freeway"
self.roadtypes_fwy = (1, 2) # road type values corresponding to freeways
self.directions_tmc = ["NORTHBOUND", "SOUTHBOUND", "EASTBOUND", "WESTBOUND"]
self.tmc_select_srchdist = 300 # units in feet. will select TMCs within this distance of project line for analysis.
self.tmc_buff_dist_ft = 90 # buffer distance, in feet, around the TMCs
self.ft2mile = 5280
# output dataframe added fields
self.fld_projdesc = 'proj_desc'
self.fld_proj_inum = 'proj_inum'
self.fld_datayear = 'data_year'
self.fld_dir_out = 'direction'
self.fld_measure = 'measure_full'
self.fld_measure_sep = 'measure'
self.fld_value = 'value'
def remove_forbidden_chars(self, in_str):
'''Replaces forbidden characters with acceptable characters'''
repldict = {"&":'And','%':'pct','/':'-', ':':'-'}
out_str = ''
for c in in_str:
if c in repldict.keys():
cfix = repldict[c]
out_str = out_str + cfix
else:
out_str = out_str + c
return out_str
def get_wtd_speed(self, in_df, in_field, direction, fld_pc_len_ft):
fielddir = "{}{}".format(direction, in_field)
fld_invspd = "spdinv_hpm"
fld_pc_tt = "projpc_tt"
fld_len_mi = "pc_len_mi"
in_df[fld_invspd] = 1/in_df[in_field] # calculate each piece's "hours per mile", or inverted speed, as 1/speed
# get each piece's travel time, in hours as inverted speed (hrs per mi) * piece distance (mi)
in_df[fld_len_mi] = in_df[fld_pc_len_ft]/self.ft2mile
in_df[fld_pc_tt] = in_df[fld_invspd] * in_df[fld_len_mi]
# get total travel time, in hours, for all pieces, then divide total distance, in miles, for all pieces by the total tt
# to get average MPH for the project
if in_df[fld_pc_tt].sum() > 0:
proj_mph = in_df[fld_len_mi].sum() / in_df[fld_pc_tt].sum()
else:
proj_mph = 0
return {fielddir: proj_mph}
def conflate_tmc2projline(self, fl_proj, dirxn_list, tmc_dir_field,
fl_tmcs_buffd, fields_calc_dict):
speed_data_fields = [k for k, v in fields_calc_dict.items()]
out_row_dict = {}
# get length of project
fld_shp_len = "SHAPE@LENGTH"
fld_totprojlen = "proj_length_ft"
with arcpy.da.SearchCursor(fl_proj, fld_shp_len) as cur:
for row in cur:
out_row_dict[fld_totprojlen] = row[0]
for direcn in dirxn_list:
# https://support.esri.com/en/technical-article/000012699
# temporary files
scratch_gdb = arcpy.env.scratchGDB
temp_intersctpts = os.path.join(scratch_gdb, "temp_intersectpoints") # r"{}\temp_intersectpoints".format(scratch_gdb)
temp_intrsctpt_singlpt = os.path.join(scratch_gdb, "temp_intrsctpt_singlpt") # converted from multipoint to single point (1 pt per feature)
temp_splitprojlines = os.path.join(scratch_gdb, "temp_splitprojlines") # fc of project line split up to match TMC buffer extents
temp_splitproj_w_tmcdata = os.path.join(scratch_gdb, "temp_splitproj_w_tmcdata") # fc of split project lines with TMC data on them
fl_splitprojlines = g_ESRI_variable_1
fl_splitproj_w_tmcdata = g_ESRI_variable_2
# get TMCs whose buffers intersect the project line
arcpy.SelectLayerByLocation_management(fl_tmcs_buffd, "INTERSECT", fl_proj)
# select TMCs that intersect the project and are in indicated direction
sql_sel_tmcxdir = g_ESRI_variable_3.format(tmc_dir_field, direcn)
arcpy.SelectLayerByAttribute_management(fl_tmcs_buffd, "SUBSET_SELECTION", sql_sel_tmcxdir)
# split the project line at the boundaries of the TMC buffer, creating points where project line intersects TMC buffer boundaries
arcpy.Intersect_analysis([fl_proj, fl_tmcs_buffd],temp_intersctpts,"","","POINT")
arcpy.MultipartToSinglepart_management (temp_intersctpts, temp_intrsctpt_singlpt)
# split project line into pieces at points where it intersects buffer, with 10ft tolerance
# (not sure why 10ft tolerance needed but it is, zero tolerance results in some not splitting)
arcpy.SplitLineAtPoint_management(fl_proj, temp_intrsctpt_singlpt,
temp_splitprojlines, "10 Feet")
arcpy.MakeFeatureLayer_management(temp_splitprojlines, fl_splitprojlines)
# get TMC speeds onto each piece of the split project line via spatial join
arcpy.SpatialJoin_analysis(temp_splitprojlines, fl_tmcs_buffd, temp_splitproj_w_tmcdata,
"JOIN_ONE_TO_ONE", "KEEP_ALL", "#", "HAVE_THEIR_CENTER_IN", "30 Feet")
# convert to fl and select records where "check field" col val is not none
arcpy.MakeFeatureLayer_management(temp_splitproj_w_tmcdata, fl_splitproj_w_tmcdata)
check_field = speed_data_fields[0] # choose first speed value field for checking--if it's null, then don't include those rows in aggregation
sql_notnull = g_ESRI_variable_4.format(check_field)
arcpy.SelectLayerByAttribute_management(fl_splitproj_w_tmcdata, "NEW_SELECTION", sql_notnull)
# convert the selected records into a numpy array then a pandas dataframe
flds_df = [fld_shp_len] + speed_data_fields
df_spddata = esri_object_to_df(fl_splitproj_w_tmcdata, flds_df)
# remove project pieces with no speed data so their distance isn't included in weighting
df_spddata = df_spddata.loc[pd.notnull(df_spddata[speed_data_fields[0]])].astype(float)
# remove rows where there wasn't enough NPMRDS data to get a valid speed or reliability reading
df_spddata = df_spddata.loc[df_spddata[flds_df].min(axis=1) > 0]
dir_len = df_spddata[fld_shp_len].sum() #sum of lengths of project segments that intersect TMCs in the specified direction
out_row_dict["{}_calc_len".format(direcn)] = dir_len #"calc" length because it may not be same as project length
# go through and do conflation calculation for each TMC-based data field based on correct method of aggregation
for field, calcmthd in fields_calc_dict.items():
if calcmthd == self.calc_inv_avg: # See PPA documentation on how to calculated "inverted speed average" method
sd_dict = self.get_wtd_speed(df_spddata, field, direcn, fld_shp_len)
out_row_dict.update(sd_dict)
elif calcmthd == self.calc_distwt_avg:
fielddir = "{}{}".format(direcn, field) # add direction tag to field names
# if there's speed data, get weighted average value.
linklen_w_speed_data = df_spddata[fld_shp_len].sum()
if linklen_w_speed_data > 0: #wgtd avg = sum(piece's data * piece's len)/(sum of all piece lengths)
avg_data_val = (df_spddata[field]*df_spddata[fld_shp_len]).sum() \
/ df_spddata[fld_shp_len].sum()
out_row_dict[fielddir] = avg_data_val
else:
out_row_dict[fielddir] = df_spddata[field].mean() #if no length, just return mean speed? Maybe instead just return 'no data avaialble'? Or -1 to keep as int?
continue
else:
continue
#cleanup
fcs_to_delete = [temp_intersctpts, temp_intrsctpt_singlpt, temp_splitprojlines, temp_splitproj_w_tmcdata]
for fc in fcs_to_delete:
arcpy.Delete_management(fc)
return | pd.DataFrame([out_row_dict]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('classic')
import pandas as pd
import quandl as Quandl
import wbdata as wb
from scipy import stats
import runProcs
# get_ipython().run_line_magic('matplotlib', 'inline')
# # Preliminaries
#
# Import country codes and country lists by income
# In[2]:
# 1. Import country codes and organize
# 1.1 Import country codes and names from the country_codes file from Quandl's WB WDI documentation: https://www.quandl.com/data/WWDI/documentation/documentation
country_codes = {}
try:
text_file = open('country_codes', 'r')
lines = text_file.readlines()
for line in lines:
split = line.split('|')
if len(split)>1:
if len(split[1])==4:
country_codes[split[0]] = split[1][:-1]
except:
country_codes = {
'Afghanistan': 'AFG',
'Africa': 'AFR',
'Albania': 'ALB',
'Algeria': 'DZA',
'American Samoa': 'ASM',
'Andorra': 'AND',
'Angola': 'AGO',
'Antigua and Barbuda': 'ATG',
'Arab World': 'ARB',
'Argentina': 'ARG',
'Armenia': 'ARM',
'Aruba': 'ABW',
'Australia': 'AUS',
'Austria': 'AUT',
'Azerbaijan': 'AZE',
'Bahamas, The': 'BHS',
'Bahrain': 'BHR',
'Bangladesh': 'BGD',
'Barbados': 'BRB',
'Belarus': 'BLR',
'Belgium': 'BEL',
'Belize': 'BLZ',
'Benin': 'BEN',
'Bermuda': 'BMU',
'Bhutan': 'BTN',
'Bolivia': 'BOL',
'Bosnia and Herzegovina': 'BIH',
'Botswana': 'BWA',
'Brazil': 'BRA',
'Brunei Darussalam': 'BRN',
'Bulgaria': 'BGR',
'Burkina Faso': 'BFA',
'Burundi': 'BDI',
'Cabo Verde': 'CPV',
'Cambodia': 'KHM',
'Cameroon': 'CMR',
'Canada': 'CAN',
'Caribbean small states': 'CSS',
'Cayman Islands': 'CYM',
'Central African Republic': 'CAF',
'Chad': 'TCD',
'Channel Islands': 'CHI',
'Chile': 'CHL',
'China': 'CHN',
'Colombia': 'COL',
'Comoros': 'COM',
'Congo, Dem. Rep.': 'COD',
'Congo, Rep.': 'COG',
'Costa Rica': 'CRI',
"Cote d'Ivoire": 'CIV',
'Croatia': 'HRV',
'Cuba': 'CUB',
'Curacao': 'CUW',
'Cyprus': 'CYP',
'Czech Republic': 'CZE',
'Denmark': 'DNK',
'Djibouti': 'DJI',
'Dominica': 'DMA',
'Dominican Republic': 'DOM',
'East Asia & Pacific (all income levels)': 'EAS',
'East Asia & Pacific (developing only)': 'EAP',
'East Asia and the Pacific (IFC classification)': 'CEA',
'Ecuador': 'ECU',
'Egypt, Arab Rep.': 'EGY',
'El Salvador': 'SLV',
'Equatorial Guinea': 'GNQ',
'Eritrea': 'ERI',
'Estonia': 'EST',
'Ethiopia': 'ETH',
'Euro area': 'EMU',
'Europe & Central Asia (all income levels)': 'ECS',
'Europe & Central Asia (developing only)': 'ECA',
'Europe and Central Asia (IFC classification)': 'CEU',
'European Union': 'EUU',
'Faeroe Islands': 'FRO',
'Fiji': 'FJI',
'Finland': 'FIN',
'France': 'FRA',
'French Polynesia': 'PYF',
'Gabon': 'GAB',
'Gambia, The': 'GMB',
'Georgia': 'GEO',
'Germany': 'DEU',
'Ghana': 'GHA',
'Greece': 'GRC',
'Greenland': 'GRL',
'Grenada': 'GRD',
'Guam': 'GUM',
'Guatemala': 'GTM',
'Guinea': 'GIN',
'Guinea-Bissau': 'GNB',
'Guyana': 'GUY',
'Haiti': 'HTI',
'Heavily indebted poor countries (HIPC)': 'HPC',
'High income': 'HIC',
'High income: OECD': 'OEC',
'High income: nonOECD': 'NOC',
'Honduras': 'HND',
'Hong Kong SAR, China': 'HKG',
'Hungary': 'HUN',
'Iceland': 'ISL',
'India': 'IND',
'Indonesia': 'IDN',
'Iran, Islamic Rep.': 'IRN',
'Iraq': 'IRQ',
'Ireland': 'IRL',
'Isle of Man': 'IMN',
'Israel': 'ISR',
'Italy': 'ITA',
'Jamaica': 'JAM',
'Japan': 'JPN',
'Jordan': 'JOR',
'Kazakhstan': 'KAZ',
'Kenya': 'KEN',
'Kiribati': 'KIR',
'Korea, Dem. Rep.': 'PRK',
'Korea, Rep.': 'KOR',
'Kosovo': 'KSV',
'Kuwait': 'KWT',
'Kyrgyz Republic': 'KGZ',
'Lao PDR': 'LAO',
'Latin America & Caribbean (all income levels)': 'LCN',
'Latin America & Caribbean (developing only)': 'LAC',
'Latin America and the Caribbean (IFC classification)': 'CLA',
'Latvia': 'LVA',
'Least developed countries: UN classification': 'LDC',
'Lebanon': 'LBN',
'Lesotho': 'LSO',
'Liberia': 'LBR',
'Libya': 'LBY',
'Liechtenstein': 'LIE',
'Lithuania': 'LTU',
'Low & middle income': 'LMY',
'Low income': 'LIC',
'Lower middle income': 'LMC',
'Luxembourg': 'LUX',
'Macao SAR, China': 'MAC',
'Macedonia, FYR': 'MKD',
'Madagascar': 'MDG',
'Malawi': 'MWI',
'Malaysia': 'MYS',
'Maldives': 'MDV',
'Mali': 'MLI',
'Malta': 'MLT',
'Marshall Islands': 'MHL',
'Mauritania': 'MRT',
'Mauritius': 'MUS',
'Mexico': 'MEX',
'Micronesia, Fed. Sts.': 'FSM',
'Middle East & North Africa (all income levels)': 'MEA',
'Middle East & North Africa (developing only)': 'MNA',
'Middle East and North Africa (IFC classification)': 'CME',
'Middle income': 'MIC',
'Moldova': 'MDA',
'Monaco': 'MCO',
'Mongolia': 'MNG',
'Montenegro': 'MNE',
'Morocco': 'MAR',
'Mozambique': 'MOZ',
'Myanmar': 'MMR',
'Namibia': 'NAM',
'Nepal': 'NPL',
'Netherlands': 'NLD',
'New Caledonia': 'NCL',
'New Zealand': 'NZL',
'Nicaragua': 'NIC',
'Niger': 'NER',
'Nigeria': 'NGA',
'North Africa': 'NAF',
'North America': 'NAC',
'Northern Mariana Islands': 'MNP',
'Norway': 'NOR',
'OECD members': 'OED',
'Oman': 'OMN',
'Other small states': 'OSS',
'Pacific island small states': 'PSS',
'Pakistan': 'PAK',
'Palau': 'PLW',
'Panama': 'PAN',
'Papua New Guinea': 'PNG',
'Paraguay': 'PRY',
'Peru': 'PER',
'Philippines': 'PHL',
'Poland': 'POL',
'Portugal': 'PRT',
'Puerto Rico': 'PRI',
'Qatar': 'QAT',
'Romania': 'ROU',
'Russian Federation': 'RUS',
'Rwanda': 'RWA',
'Samoa': 'WSM',
'San Marino': 'SMR',
'Sao Tome and Principe': 'STP',
'Saudi Arabia': 'SAU',
'Senegal': 'SEN',
'Serbia': 'SRB',
'Seychelles': 'SYC',
'Sierra Leone': 'SLE',
'Singapore': 'SGP',
'Sint Maarten (Dutch part)': 'SXM',
'Slovak Republic': 'SVK',
'Slovenia': 'SVN',
'Small states': 'SST',
'Solomon Islands': 'SLB',
'Somalia': 'SOM',
'South Africa': 'ZAF',
'South Asia': 'SAS',
'South Asia (IFC classification)': 'CSA',
'South Sudan': 'SSD',
'Spain': 'ESP',
'Sri Lanka': 'LKA',
'St. Kitts and Nevis': 'KNA',
'St. Lucia': 'LCA',
'St. Martin (French part)': 'MAF',
'St. Vincent and the Grenadines': 'VCT',
'Sub-Saharan Africa (IFC classification)': 'CAA',
'Sub-Saharan Africa (all income levels)': 'SSF',
'Sub-Saharan Africa (developing only)': 'SSA',
'Sub-Saharan Africa excluding South Africa': 'SXZ',
'Sub-Saharan Africa excluding South Africa and Nigeria': 'XZN',
'Sudan': 'SDN',
'Suriname': 'SUR',
'Swaziland': 'SWZ',
'Sweden': 'SWE',
'Switzerland': 'CHE',
'Syrian Arab Republic': 'SYR',
'Tajikistan': 'TJK',
'Tanzania': 'TZA',
'Thailand': 'THA',
'Timor-Leste': 'TLS',
'Togo': 'TGO',
'Tonga': 'TON',
'Trinidad and Tobago': 'TTO',
'Tunisia': 'TUN',
'Turkey': 'TUR',
'Turkmenistan': 'TKM',
'Turks and Caicos Islands': 'TCA',
'Tuvalu': 'TUV',
'Uganda': 'UGA',
'Ukraine': 'UKR',
'United Arab Emirates': 'ARE',
'United Kingdom': 'GBR',
'United States': 'USA',
'Upper middle income': 'UMC',
'Uruguay': 'URY',
'Uzbekistan': 'UZB',
'Vanuatu': 'VUT',
'Venezuela, RB': 'VEN',
'Vietnam': 'VNM',
'Virgin Islands (U.S.)': 'VIR',
'West Bank and Gaza': 'PSE',
'World': 'WLD',
'Yemen, Rep.': 'YEM',
'Zambia': 'ZMB',
'Zimbabwe': 'ZWE'}
#1.2 Use wbdata to get lists of country codes by income groups
countries_income_all = [i['id'] for i in wb.get_country(incomelevel=['LIC','MIC','HIC'])]
countries_income_H = [i['id'] for i in wb.get_country(incomelevel=['HIC'])]
countries_income_M = [i['id'] for i in wb.get_country(incomelevel=['MIC'])]
countries_income_L = [i['id'] for i in wb.get_country(incomelevel=['LIC'])]
countries_income_oecd = ['AUS','CAN','CHL','CZE','DNK','EST','HUN','ISL','ISR','JPN'
,'KOR','NZL','NOR''POL','SVK','SVN','SWE','CHE','USA']
# # Import data from Quandl
# In[ ]:
# 2. Import data from Quandl
# 2.1 Money supply (LCU)
money_df = pd.DataFrame({})
for name,key in country_codes.items():
try:
df = Quandl.get('WWDI/'+key+'_FM_LBL_MONY_CN',authtoken="<KEY>")
df.columns = [key]
money_df = pd.concat([money_df,df],axis=1)
except:
pass
# 2.2 GDP deflator
deflator_df = pd.DataFrame({})
for name,key in country_codes.items():
try:
df = Quandl.get('WWDI/'+key+'_NY_GDP_DEFL_ZS',authtoken="<KEY>")
df.columns = [key]
deflator_df = pd.concat([deflator_df,df],axis=1)
except:
pass
# 2.3 Real GDP
gdp_df = pd.DataFrame({})
for name,key in country_codes.items():
try:
df = Quandl.get('WWDI/'+key+'_NY_GDP_MKTP_KD',authtoken="<KEY>")
df.columns = [key]
gdp_df = pd.concat([gdp_df,df],axis=1)
except:
pass
# 2.4 Exahange rate relative to USD
exchange_df = | pd.DataFrame({}) | pandas.DataFrame |
#!/usr/bin/env python3
import requests
import json
import pandas as pd
import numpy as np
import os
import sys
import time
from datetime import datetime, date
from strava_logging import logger
from db_connection import connect, sql
from location_data import lookup_location
class Athlete:
def __init__(self, **kwargs):
self.conn = self.create_connection()
if kwargs:
self.cond = next(iter(kwargs.keys()))
self.val = next(iter(kwargs.values()))
else:
self.cond = None
self.val = None
self.df = self.return_df()
self.ath_info = self.athlete_info()
@staticmethod
def create_connection():
return connect()
def create_new_athlete(self, athlete_id: int, client_id: int, client_secret: str, refresh_token: str,
firstname: str, lastname: str):
"""
Creates a new athlete in the database.
:param athlete_id: Identifier of the athlete in Strava
:param client_id: ID provided to access the athlete's data in the API
:param client_secret: Secret code for this API user.
:param refresh_token: Token used to refresh the API connection.
:param firstname: First name of the athlete.
:param lastname: Last name of the athlete.
:return:
"""
new_athlete_info = {
'athlete_id': athlete_id,
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'firstname': firstname,
'lastname': lastname
}
df_new = pd.DataFrame(new_athlete_info)
conn = self.create_connection()
df_new.to_sql('athletes', conn, if_exists='append', index=False)
conn.close()
def return_df(self) -> pd.DataFrame:
df = pd.read_sql(sql="""SELECT * FROM athletes""", con=self.conn)
self.close_conn()
if self.cond is not None and self.val is not None and self.cond in df.columns:
df = df.loc[df[self.cond] == self.val]
return df
def athlete_info(self) -> dict:
"""
Returns the athlete's data which will be used in the Activities class.
:return:
"""
return self.df.to_dict(orient='records')[0]
def close_conn(self):
self.conn.close()
class Activities:
def __init__(self, athlete_info: dict):
self.athlete_info = athlete_info
assert self.athlete_info is not None, f"Please provide athlete info. " \
f"Client_id, client_secret and refresh_token required."
self.athlete_id = self.athlete_info['athlete_id']
self.base_url = 'https://www.strava.com/api/v3'
self.refresh_data = self.refresh_api_connection()
self.access_token = self.refresh_data['access_token']
self.headers = {'Authorization': f"Bearer {self.access_token}"}
self.token_expires = self.refresh_data['expires_at']
self.conn = connect()
self.latest_activity = self.get_latest_activity()
self.earliest_activity = self.get_earliest_activity()
self.existing_locations = self.get_existing_locations()
self.existing_gear = self.get_existing_gear()
self.df = | pd.DataFrame() | pandas.DataFrame |
"""Tests for _data_reading.py"""
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import primap2
import primap2.pm2io as pm2io
import primap2.pm2io._conversion
from primap2.pm2io._data_reading import additional_coordinate_metadata
from .utils import assert_ds_aligned_equal
DATA_PATH = Path(__file__).parent / "data"
@pytest.mark.parametrize(
"unit, entity, expected_attrs",
[
("Mt", "CO2", {"units": "Mt", "entity": "CO2"}),
(
"Gg CO2",
"KYOTOGHG (AR4GWP100)",
{
"units": "Gg CO2",
"entity": "KYOTOGHG",
"gwp_context": "AR4GWP100",
},
),
(
"kg CO2",
"CH4 (SARGWP100)",
{
"units": "kg CO2",
"entity": "CH4",
"gwp_context": "SARGWP100",
},
),
],
)
def test_metadata_for_variable(unit, entity, expected_attrs):
assert (
pm2io._interchange_format.metadata_for_variable(unit, entity) == expected_attrs
)
def assert_attrs_equal(attrs_result, attrs_expected):
assert attrs_result.keys() == attrs_expected.keys()
assert attrs_result["attrs"] == attrs_expected["attrs"]
assert attrs_result["time_format"] == attrs_expected["time_format"]
assert attrs_result["dimensions"].keys() == attrs_expected["dimensions"].keys()
for entity in attrs_result["dimensions"]:
assert set(attrs_result["dimensions"][entity]) == set(
attrs_expected["dimensions"][entity]
)
@pytest.fixture
def coords_cols():
return {
"unit": "unit",
"entity": "gas",
"area": "country",
"category": "category",
"sec_cats__Class": "classification",
}
@pytest.fixture
def add_coords_cols():
return {"category_name": ["category_name", "category"]}
@pytest.fixture
def coords_defaults():
return {
"source": "TESTcsv2021",
"sec_cats__Type": "fugitive",
"scenario": "HISTORY",
}
@pytest.fixture
def coords_terminologies():
return {
"area": "ISO3",
"category": "IPCC2006",
"sec_cats__Type": "type",
"sec_cats__Class": "class",
"scenario": "general",
}
@pytest.fixture
def coords_value_mapping():
return {
"category": "PRIMAP1",
"entity": "PRIMAP1",
"unit": "PRIMAP1",
}
@pytest.fixture
def coords_value_filling():
return {
"category": { # col to fill
"category_name": { # col to fill from
"Energy": "1", # from value: to value
"IPPU": "2",
}
}
}
@pytest.fixture
def filter_keep():
return {
"f1": {"category": ["IPC0", "IPC2"]},
"f2": {"classification": "TOTAL"},
}
@pytest.fixture
def filter_remove():
return {"f1": {"gas": "CH4"}, "f2": {"country": ["USA", "FRA"]}}
class TestReadWideCSVFile:
def test_output(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
meta_data = {"references": "Just ask around."}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
meta_data=meta_data,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"references": "Just ask around.",
"sec_cats": ["Class (class)", "Type (type)"],
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"Type (type)",
"unit",
"scenario (general)",
"Class (class)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_no_sec_cats(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_add_coords(
self,
tmp_path,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data_category_name.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats_cat_name.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
"additional_coordinates": {"category_name": "category (IPCC2006)"},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_read_wide_fill_col(
self,
tmp_path,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
coords_value_filling,
):
file_input = DATA_PATH / "test_csv_data_category_name_fill_cat_code.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats_cat_name.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
coords_value_filling=coords_value_filling,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
"additional_coordinates": {"category_name": "category (IPCC2006)"},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_entity_terminology(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats.csv"
df_expected: pd.DataFrame = pd.read_csv(file_expected, index_col=0)
df_expected.rename(columns={"entity": "entity (PRIMAP1)"}, inplace=True)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
coords_terminologies["entity"] = "PRIMAP1"
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
"entity_terminology": "PRIMAP1",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity (PRIMAP1)",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_coords_value_mapping_dict(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
coords_value_mapping = {
"category": {"IPC1": "1", "IPC2": "2", "IPC3": "3", "IPC0": "0"},
"entity": {"KYOTOGHG": "KYOTOGHG (SARGWP100)"},
"unit": "PRIMAP1",
}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_entity_default(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output_entity_def.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["entity"]
del coords_value_mapping["entity"]
coords_defaults["entity"] = "CO2"
del filter_remove["f1"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_unit_default(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output_unit_def.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["unit"]
coords_defaults["unit"] = "Gg"
filter_remove["f1"] = {"gas": "KYOTOGHG"}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "test.csv")
df_result = pd.read_csv(tmp_path / "test.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_function_mapping(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output_unit_def.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["unit"]
coords_defaults["unit"] = "Gg"
coords_value_mapping[
"category"
] = pm2io._conversion.convert_ipcc_code_primap_to_primap2
filter_remove["f1"] = {"gas": "KYOTOGHG"}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "test.csv")
df_result = pd.read_csv(tmp_path / "test.csv", index_col=0)
| pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False) | pandas.testing.assert_frame_equal |
#Imports
from aiohttp import ClientSession
from itertools import chain
import pandas as pd
import asyncio
#Stock ticker and dates for data required
#IEX api key
start = '2019/04/01' #earliest date available on non-premium IEX accounts
end = '2020/11/27'
key = 'IEX API KEY' #enter api key from IEX
ticker = 'FB'
#Convert dates into api url format
daterange = pd.date_range(start=start, end=end).strftime('%Y%m%d')
#Function for asynchronous api request
async def fetch(session, url):
async with session.get(url) as response:
data = await response.json()
return data
async def main(daterange):
async with ClientSession() as session:
tasks = []
for date in daterange:
tasks.append(
asyncio.create_task(
fetch(session, f'https://cloud.iexapis.com/stable/stock/{ticker}/chart/date/{date}?token={key}&chartIEXOnly=true',)))
content = await asyncio.gather(*tasks, return_exceptions=True)
return content
#Unable to make more than 30 request at a time
#Function to breakout dates into batches
def batch(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
batch_dates = batch(list(daterange), 30) #List of dates in batches of 30
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid1_parser(df): # _ N N _ _
cur_df=df.copy()
try:
cur_df[['w1','modifier','head','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid2_parser(df): # _ _ N N _
cur_df=df.copy()
try:
cur_df[['w1','w2','modifier','head','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def right_side_parser(df): # _ _ _ N N
cur_df=df.copy()
try:
cur_df[['w1','w2','w3','modifier','head']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def syntactic_reducer(df):
pattern=df.iloc[0].comp_class
if pattern==1: # N N _ _ N N
compound_left_df,modifier_left_df,head_left_df=left_side_parser(df)
compound_right_df,modifier_right_df,head_right_df=right_side_parser(df)
final_compound_df=pd.concat([compound_left_df,compound_right_df],ignore_index=True)
final_modifier_df=pd.concat([modifier_left_df,modifier_right_df],ignore_index=True)
final_head_df=pd.concat([head_left_df,head_right_df],ignore_index=True)
elif pattern==2: # N N _ _ _
final_compound_df,final_modifier_df,final_head_df=left_side_parser(df)
elif pattern==3: # _ N N _ _
final_compound_df,final_modifier_df,final_head_df=mid1_parser(df)
elif pattern==4: # _ _ N N _
final_compound_df,final_modifier_df,final_head_df=mid2_parser(df)
elif pattern==5: # _ _ _ N N
final_compound_df,final_modifier_df,final_head_df=right_side_parser(df)
return final_compound_df,final_modifier_df,final_head_df
def compound_extracter(df):
if df.loc[df.comp_class==1].shape[0]!=0:
sides_comp_df,sides_mod_df,sides_head_df=syntactic_reducer(df.loc[df.comp_class==1])
else:
sides_comp_df=pd.DataFrame()
sides_mod_df=pd.DataFrame()
sides_head_df=pd.DataFrame()
if df.loc[df.comp_class==2].shape[0]!=0:
left_comp_df,left_mod_df,left_head_df=syntactic_reducer(df.loc[df.comp_class==2])
else:
left_comp_df=pd.DataFrame()
left_mod_df=pd.DataFrame()
left_head_df=pd.DataFrame()
if df.loc[df.comp_class==3].shape[0]!=0:
mid1_comp_df,mid1_mod_df,mid1_head_df=syntactic_reducer(df.loc[df.comp_class==3])
else:
mid1_comp_df=pd.DataFrame()
mid1_mod_df=pd.DataFrame()
mid1_head_df=pd.DataFrame()
if df.loc[df.comp_class==4].shape[0]!=0:
mid2_comp_df,mid2_mod_df,mid2_head_df=syntactic_reducer(df.loc[df.comp_class==4])
else:
mid2_comp_df=pd.DataFrame()
mid2_mod_df=pd.DataFrame()
mid2_head_df=pd.DataFrame()
if df.loc[df.comp_class==5].shape[0]!=0:
right_comp_df,right_mod_df,right_head_df=syntactic_reducer(df.loc[df.comp_class==5])
else:
right_comp_df=pd.DataFrame()
right_mod_df=pd.DataFrame()
right_head_df=pd.DataFrame()
compounds=pd.concat([sides_comp_df,left_comp_df,mid1_comp_df,mid2_comp_df,right_comp_df],ignore_index=True,sort=False)
modifiers=pd.concat([sides_mod_df,left_mod_df,mid1_mod_df,mid2_mod_df,right_mod_df],ignore_index=True,sort=False)
heads=pd.concat([sides_head_df,left_head_df,mid1_head_df,mid2_head_df,right_head_df],ignore_index=True,sort=False)
if len(compounds)==0:
return compounds,modifiers,heads
compounds.dropna(inplace=True)
compounds=compounds.groupby(['modifier','head','context','year'])['count'].sum().to_frame()
compounds.reset_index(inplace=True)
modifiers.dropna(inplace=True)
modifiers=modifiers.groupby(['modifier','context','year'])['count'].sum().to_frame()
modifiers.reset_index(inplace=True)
heads.dropna(inplace=True)
heads=heads.groupby(['head','context','year'])['count'].sum().to_frame()
heads.reset_index(inplace=True)
return compounds,modifiers,heads
def parallelize_dataframe(df):
num_partitions=round(0.95*mp.cpu_count())
df_split = np.array_split(df, num_partitions)
print("Done splitting the datasets")
pool = Pool(num_partitions)
cur_time=time.time()
print("Starting parallelizing")
if not args.word:
results=pool.map_async(compound_extracter,df_split)
pool.close()
pool.join()
results=results.get()
print("Done parallelizing")
print("Total time taken",round(time.time()-cur_time),"secs")
compound_list = [ result[0] for result in results]
compounds=pd.concat(compound_list,ignore_index=True)
compounds=compounds.groupby(['modifier','head','context','year'])['count'].sum().to_frame()
compounds.reset_index(inplace=True)
if not isfile(f'{args.output}/compounds.csv'):
compounds.to_csv(f'{args.output}/compounds.csv',sep="\t",index=False)
else:
compounds.to_csv(f'{args.output}/compounds.csv', mode='a',sep="\t", header=False,index=False)
modifier_list = [ result[1] for result in results]
modifiers=pd.concat(modifier_list,ignore_index=True)
modifiers=modifiers.groupby(['modifier','context','year'])['count'].sum().to_frame()
modifiers.reset_index(inplace=True)
if not isfile(f'{args.output}/modifiers.csv'):
modifiers.to_csv(f'{args.output}/modifiers.csv',sep="\t",index=False)
else:
modifiers.to_csv(f'{args.output}/modifiers.csv', mode='a',sep="\t",header=False,index=False)
head_list = [ result[2] for result in results]
heads=pd.concat(head_list,ignore_index=True)
heads=heads.groupby(['head','context','year'])['count'].sum().to_frame()
heads.reset_index(inplace=True)
if not isfile(f'{args.output}/heads.csv'):
heads.to_csv(f'{args.output}/heads.csv',sep="\t",index=False)
else:
heads.to_csv(f'{args.output}/heads.csv', mode='a',sep="\t",header=False,index=False)
# phrase_list = [ result[3] for result in results]
# phrases=pd.concat(phrase_list,ignore_index=True)
# phrases=phrases.groupby(['modifier','head','context','year'])['count'].sum().to_frame()
# phrases.reset_index(inplace=True)
# if not isfile(f'{args.output}/phrases.csv'):
# phrases.to_csv(f'{args.output}/phrases.csv',sep="\t",index=False)
# else:
# phrases.to_csv(f'{args.output}/phrases.csv', mode='a',sep="\t",header=False,index=False)
else:
words_list=[]
results=pool.map_async(cdsm_word_reducer,df_split)
pool.close()
pool.join()
print("Done parallelizing")
print("Total time taken",round(time.time()-cur_time),"secs")
words_list=results.get()
words = | pd.concat(words_list,ignore_index=True,sort=False) | pandas.concat |
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import ep_time_step, config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim, access_block
from typing import Dict, List
import numpy as np
from sqlalchemy import create_engine
import pandas as pd
import json
import datetime
from decimal import Decimal
from datetime import timedelta
import matplotlib.pyplot as plt
import math
import datetime
from datetime import timedelta
# from genesis_states import genesis_states
# from functions import *
# from partial_state_update_block import partial_state_update_block
from cadCAD.configuration.utils import ep_time_step,config_sim, access_block
from cadCAD.configuration import append_configs
from tabulate import tabulate
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
from typing import Dict, List
from functions import *
from genesis_states import genesis_states
from partial_state_update_block import partial_state_update_block
#Internal
avg_200 = 200
avg_250 = 250
avg_300 = 300
avg_350 = 350
avg_400 = 400
record = 57
games = 160
seasons = 20
attempts = games * seasons - record
#print(attempts)
# games = 160
# seasons = 20
time_step_count = games * seasons
run_count = 2
# ------------------- RANDOM STATE SEED ------------------------------
seed = {
# 'z': np.random.RandomState(1)
}
#--------------EXOGENOUS STATE MECHANISM DICTIONARY--------------------
exogenous_states = {
"timestamp": set_time,
}
#--------------ENVIRONMENTAL PROCESS DICTIONARY------------------------
env_processes = {
}
#----------------------SIMULATION RUN SETUP----------------------------
sim_config = config_sim(
{
"N": run_count,
"T": range(time_step_count)
# "M": g # for parameter sweep
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seed,
raw_exogenous_states= exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_block
)
exec_mode = ExecutionMode()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run1 = Executor(exec_context=single_proc_ctx, configs=first_config)
run1_raw_result, tensor_field = run1.execute()
result = | pd.DataFrame(run1_raw_result) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 18:41:43 2018
@author: <NAME>
"""
# Libraries
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
def obtain_centroid(X_train, sc, n_clusters):
"""
Function to obtain the centroid of a group of data points. It uses K-Prototypes algorithm so
it can work with both numerical and categorical (non ordinal) data.
It returns the centroid/prototype point for that data as well as the assigned clusters for each datapoint.
"""
kmeans = KMeans(n_clusters = n_clusters, init= 'k-means++',
max_iter = 300, n_init = 10, random_state = 0)
labels = kmeans.fit_predict(X_train)
centroid = kmeans.cluster_centers_
centroid = sc.inverse_transform(centroid)
return pd.DataFrame({'labels':labels}), | pd.DataFrame(centroid) | pandas.DataFrame |
import pandas as pd
from sklearn.preprocessing import PowerTransformer
def preprocess_columns(df):
"""
Assumptions:
- Remove variables with more than 50% missing values
- Replace missing values of numerical variables with per mean
- Remove categorical variables with more than 25 unique values
:return: df
"""
mv_cols = df.columns[df.isnull().sum() / len(df) > 0.5]
df.drop(mv_cols, axis=1, inplace=True)
cols = df.columns
num_cols = df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
if len(cat_cols) > 0:
for cat_col in cat_cols:
if len(df[cat_col].unique()) > 25:
df.drop(cat_col, axis=1, inplace=True)
cols = df.columns
num_cols = df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
if len(cat_cols) > 0:
for cat_col in cat_cols:
df[cat_col] = df[cat_col].fillna(-1)
if len(num_cols) > 0:
for num_col in num_cols:
df[num_col] = df[num_col].fillna(df[num_col].mean())
return df
def load_water_quality_data():
# https://www.kaggle.com/adityakadiwal/water-potability
df = pd.read_csv('../data/water_potability.csv', sep=',')
y_df = df['Potability']
X_df = df.drop('Potability', axis=1)
X_df = preprocess_columns(X_df)
y_df = y_df.astype(int)
y_word_dict = {1: 'Potable_yes', 0: 'Potable_no'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_stroke_data():
# https://www.kaggle.com/fedesoriano/stroke-prediction-dataset
df = pd.read_csv('../data/healthcare-dataset-stroke-data.csv', sep=',')
y_df = df['stroke']
X_df = df.drop('stroke', axis=1)
X_df['hypertension'] = X_df['hypertension'].replace({1: "Yes", 0: "No"})
X_df['heart_disease'] = X_df['heart_disease'].replace({1: "Yes", 0: "No"})
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'avg_glucose_level', 'bmi']]
X_df = X_df[cat_cols+num_cols]
X_df = preprocess_columns(X_df)
y_df = y_df.astype(int)
y_word_dict = {1: 'Stroke_yes', 0: 'Stroke_no'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_telco_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv('../data/WA_Fn-UseC_-Telco-Customer-Churn.csv')
y_df = df['Churn']
X_df = df.drop(['Churn', 'customerID'], axis=1)
X_df['SeniorCitizen'] = X_df['SeniorCitizen'].replace({1: "Yes", 0: "No"})
X_df['TotalCharges'] = pd.to_numeric(X_df['TotalCharges'].replace(" ", ""))
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['tenure', 'MonthlyCharges', 'TotalCharges']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = y_df.replace({'Yes': 1, 'No': 0})
y_df = y_df.astype(int)
y_word_dict = {1: 'Churn_Yes', 0: 'Churn_No'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_fico_data():
# https://community.fico.com/s/explainable-machine-learning-challenge?tabset-158d9=3
df = pd.read_csv('../data/fico_heloc_dataset_v1.csv')
X_df = df.drop(['RiskPerformance'], axis=1)
X_df['MaxDelq2PublicRecLast12M'] = X_df['MaxDelq2PublicRecLast12M'].astype(str)
X_df['MaxDelqEver'] = X_df['MaxDelqEver'].astype(str)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
cat_cols = [cat_col for cat_col in cat_cols if cat_col in ['MaxDelq2PublicRecLast12M', 'MaxDelqEver']]
X_df = X_df[cat_cols+num_cols.tolist()]
X_df = preprocess_columns(X_df)
y_df = df['RiskPerformance']
y_df = y_df.replace({'Good': 1, 'Bad': 0})
y_df = y_df.astype(int)
y_word_dict = {1: 'Good', 0: 'Bad'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_bank_marketing_data():
# https://archive.ics.uci.edu/ml/datasets/Bank+Marketing
df = pd.read_csv('../data/bank-full.csv', sep=';')
y_df = df['y']
X_df = df.drop('y', axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'duration', 'campaign', 'pdays', 'previous']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = y_df.replace({'yes': 1, 'no': 0})
y_df = y_df.astype(int)
y_word_dict = {1: 'Deposit_subscribed_yes', 0: 'Deposit_subscribed_no'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_adult_data():
df = pd.read_csv('../data/adult_census_income.csv')
X_df = df.drop(['income'], axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'fnlwgt', 'education.num',
'capital.gain', 'capital.loss', 'hours.per.week']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = df["income"]
y_df = y_df.replace({' <=50K': 0, ' >50K': 1})
y_df = y_df.astype(int)
y_word_dict = {0: 'Income<=50K', 1: 'Income>50K'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_airline_passenger_data():
# https://www.kaggle.com/teejmahal20/airline-passenger-satisfaction
df = pd.read_csv('../data/airline_train.csv', sep=',')
y_df = df['satisfaction']
X_df = df.drop(['Unnamed: 0', 'id', 'satisfaction'], axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
cat_cols = [cat_col for cat_col in cat_cols if cat_col in ['Gender', 'Customer Type', 'Type of Travel', 'Class']]
X_df = X_df[cat_cols + num_cols.tolist()]
X_df = preprocess_columns(X_df)
y_df = y_df.replace({'satisfied': 1, 'neutral or dissatisfied': 0})
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_car_data():
# https: // archive.ics.uci.edu / ml / datasets / automobile
df = pd.read_csv('../data/car.data', sep=',')
X_df = df.drop(['price'], axis=1)
X_df = X_df.replace("?", "")
X_df['peak-rpm'] = pd.to_numeric(X_df['peak-rpm'])
X_df['horsepower'] = pd.to_numeric(X_df['horsepower'])
X_df['stroke'] = pd.to_numeric(X_df['stroke'])
X_df['bore'] = pd.to_numeric(X_df['bore'])
X_df['normalized-losses'] = pd.to_numeric(X_df['normalized-losses'])
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['wheel-base', 'length', 'width', 'height', 'curb-weight',
'engine-size', 'bore', 'stroke', 'compression-ratio',
'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = df['price']
pt = PowerTransformer(method="box-cox")
y_np = pt.fit_transform(y_df.to_numpy().reshape(-1, 1))
y_df = pd.DataFrame(data=y_np, columns=["y"])
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_student_grade_data():
# https://archive.ics.uci.edu/ml/datasets/Student+Performance
df = | pd.read_csv('../data/student-por.csv', sep=';') | pandas.read_csv |
# -*- coding: utf-8 -*-
# import libraries
import pandas as pd
import statsmodels.api as sm
'''
Download monthly prices of Facebook and S&P 500 index from 2014 to 2017
CSV file downloaded from Yahoo File
start period: 02/11/2014
end period: 30/11/2014
period format: DD/MM/YEAR
'''
fb = pd.read_csv('FB.txt', parse_dates=True, index_col='Date',)
sp_500 = pd.read_csv('^GSPC.txt', parse_dates=True, index_col='Date')
# joining the closing prices of the two datasets
monthly_prices = | pd.concat([fb['Close'], sp_500['Close']], axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
| Timestamp("20130228 21:00:00") | pandas.Timestamp |
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from sklearn.metrics import mean_absolute_error
from ...preprocessing import detection, clustering
from ...models.sts_epr import STS_epr
from ...core.trajectorydataframe import TrajDataFrame
from ...models.markov_diary_generator import MarkovDiaryGenerator
def global_variables():
# tessellation
tess_polygons = [[[7.481, 45.184],
[7.481, 45.216],
[7.526, 45.216],
[7.526, 45.184],
[7.481, 45.184]],
[[7.481, 45.216],
[7.481, 45.247],
[7.526, 45.247],
[7.526, 45.216],
[7.481, 45.216]],
[[7.526, 45.184],
[7.526, 45.216],
[7.571, 45.216],
[7.571, 45.184],
[7.526, 45.184]],
[[7.526, 45.216],
[7.526, 45.247],
[7.571, 45.247],
[7.571, 45.216],
[7.526, 45.216]]]
geom = [shapely.geometry.Polygon(p) for p in tess_polygons]
tessellation = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
tessellation = tessellation.reset_index().rename(columns={"index": constants.TILE_ID})
relevance = np.random.randint(5, 10, size=len(tessellation))
tessellation[constants.RELEVANCE] = relevance
social_graph = [[0,1],[0,2],[0,3],[1,3],[2,4]]
# mobility diary generator
lats_lngs = np.array([[39.978253, 116.3272755],
[40.013819, 116.306532],
[39.878987, 116.1266865],
[40.013819, 116.306532],
[39.97958, 116.313649],
[39.978696, 116.3262205],
[39.98153775, 116.31079],
[39.978161, 116.3272425],
[38.978161, 115.3272425]])
traj = pd.DataFrame(lats_lngs, columns=[constants.LATITUDE, constants.LONGITUDE])
traj[constants.DATETIME] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[constants.UID] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
tdf = TrajDataFrame(traj)
ctdf = clustering.cluster(tdf)
mdg = MarkovDiaryGenerator()
mdg.fit(ctdf, 3, lid='cluster')
return tessellation, social_graph, mdg
tessellation, social_graph, mdg = global_variables()
sts_epr = STS_epr()
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph, 'random'])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('rsl', [True, False])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
# First test set: CORRECT arguments, no ERRORS expected (#test: 8)
def test_sts_generate_success(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# Second test set: WRONG arguments, expected to FAIL
# test 2.1: wrong n_agents (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [-2,-1,0])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_n_agents(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# test 2.2: end_date prior to start_date (#test: 1)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_dates(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.3: wrong rsl type (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [ | pd.to_datetime('2020/01/10 08:00:00') | pandas.to_datetime |
import argparse
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
import seaborn as sns
plt.style.use(["bmh"])
sns.set_palette(sns.color_palette("Paired", 6))
def get_args():
parser = argparse.ArgumentParser("graph argument")
parser.add_argument("--data", type=str, nargs="+")
parser.add_argument("--xlim", type=int, default=171)
return parser.parse_args()
def graph(args, data_list):
ax = plt.axes()
ax.yaxis.set_major_formatter(
ticker.FuncFormatter(lambda x, pos: f"{x/1e+12:.0f}조")
)
m = pd.DataFrame(columns=["time", "deal", "label"])
for i, df in enumerate(data_list):
df = df.drop(["name", "loss", "hit", "mdf", "spec"], axis=1)
df["time"] = df["time"] - df["time"].min()
df = df[df["time"] <= args.xlim*1000]
df["time"] = df["time"].astype("timedelta64[ms]")
deals = df.resample("5000ms", on="time")["deal"].sum().reset_index()
deals["time"] = deals["time"].dt.total_seconds()
deals["label"] = args.data[i]
m = m.append(deals)
print(m)
sns.barplot(x="time", y="deal", hue="label", data=m)
plt.xticks(np.arange(0, args.xlim//5, args.xlim//40), [f"{i*5}" for i in np.arange(0, args.xlim//5, args.xlim//40)])
plt.legend()
plt.show()
if __name__ == "__main__":
args = get_args()
data_list = [ | pd.read_pickle(f"./data/{label}.pkl") | pandas.read_pickle |
"""Tests for Resource harvesting methods."""
from typing import Any, Dict, List
import numpy as np
import pandas as pd
import pytest
from pudl.metadata.classes import Package, Resource, RESOURCE_METADATA
from pudl.metadata.helpers import most_frequent
# ---- Helpers ---- #
def _assert_frame_equal(a: pd.DataFrame, b: pd.DataFrame, **kwargs: Any) -> None:
"""Assert dataframes are equal, printing a useful error if not."""
try:
| pd.testing.assert_frame_equal(a, b, **kwargs) | pandas.testing.assert_frame_equal |
"""
Pipeline Evaluation module
This module runs all the steps used and allows you to visualize them.
"""
import datetime
from typing import List, Tuple, Union
import pandas as pd
from sklearn.pipeline import Pipeline
from .evaluation import Evaluator
from .feature_reduction import FeatureReductor
from .labeling import Labeler
from .splitting import Splitter
from .utils import Picklable, visualize_data, visualize_labels
class PipelineEvaluator(Picklable):
"""
PipelineEvaluator contains all modules and triggers them.
"""
def __init__(
self,
labeler: Labeler = None,
splitter: Splitter = None,
pipeline: Pipeline = None,
feature_reductor: FeatureReductor = None,
model=None,
evaluator: Evaluator = None,
dropna: bool = True,
downprojector=None,
visualize: Union[bool, List[str]] = False,
verbose: bool = True,
):
self.labeler = labeler
self.splitter = splitter
self.pipeline = pipeline
self.feature_reductor = feature_reductor
self.model = model
self.evaluator = evaluator
self.dropna = dropna
self.downprojector = downprojector
self.visualize = visualize
self.verbose = verbose
if isinstance(self.visualize, bool):
if self.visualize:
self.visualize = [
"labeler",
"splitter",
"pipeline",
"feature_reductor",
"model",
"evaluator",
]
else:
self.visualize = []
def _log(self, text) -> None:
"""
Print actual time and provided text if verobse is True.
Parameters
----------
text: string
Comment added to printed time.
"""
if self.verbose:
print(datetime.datetime.now().time().strftime("%H:%M:%S.%f")[:-3], text)
def _drop_na(self, X: pd.DataFrame, y: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
"""
Drop rows with NaN values from begining.
Returns
-------
X, y : tupple (pd.DataFrame, pd.Series)
X as data (with features) and y as labels.
"""
original_shape = X.shape
X.dropna(axis=1, thresh=int(X.shape[0] * 0.9), inplace=True)
cut_number = X.isna().sum().max()
X = X.iloc[cut_number:, :]
if X.isna().sum().sum() > 0:
X = X.dropna(axis=0)
y = y.loc[X.index]
self._log(
f"\tOriginal shape:\t\t{original_shape}; \n\t\tshape after removing NaNs: {X.shape}."
)
return X, y
def run(self, data=None):
"""
Run each module on provided data.
Parameters
----------
data : array-like
Data to evaluate the pipeline on.
Returns
-------
result : dict
Dict of calculated metric values labeled by their names.
"""
if self.labeler is not None:
self._log("Labeling data")
self.labels = self.labeler.transform(data)
if "labeler" in self.visualize:
self.labeler.visualize(labels=self.labels)
if self.splitter is not None:
self._log("Splitting data")
(
self.X_train,
self.X_test,
self.y_train,
self.y_test,
) = self.splitter.transform(X=data, y=self.labels)
if "splitter" in self.visualize:
self.splitter.visualize(X=[self.X_train, self.X_test])
if self.pipeline is not None:
self._log("Fitting pipeline")
self.X_train = self.pipeline.fit_transform(self.X_train, self.y_train)
self._log("Applying pipeline transformations")
self.X_test = self.pipeline.transform(self.X_test)
if self.dropna:
self.X_train, self.y_train = self._drop_na(X=self.X_train, y=self.y_train)
self.X_test, self.y_test = self._drop_na(X=self.X_test, y=self.y_test)
if "pipeline" in self.visualize:
visualize_data(
X=self.X_train,
y=self.y_train,
downprojector=self.downprojector,
title="Visualization of pipeline output",
)
if self.feature_reductor is not None:
self._log("Applying feature reduction")
self.feature_reductor.fit(self.X_train, self.y_train)
self.X_train = self.feature_reductor.transform(self.X_train)
self.X_test = self.feature_reductor.transform(self.X_test)
if "feature_reductor" in self.visualize:
self.feature_reductor.visualize(
X=self.X_train,
y=self.y_train,
downprojector=self.downprojector,
title="Visualization of FeatureReductor output",
)
if self.model is not None:
self._log("Fitting model")
self.model.fit(self.X_train, self.y_train)
if "model" in self.visualize:
self.y_pred = self.model.predict(self.X_train)
if len(self.y_pred.shape) == 1 or self.y_pred.shape[1] == 1:
self.y_pred = pd.Series(self.y_pred, index=self.X_train.index)
else:
self.y_pred = pd.DataFrame(self.y_pred, index=self.X_train.index)
visualize_labels(
labels=pd.DataFrame(
{"y_true": self.y_train, "y_pred": self.y_pred}
),
title="Visualize TRAIN predictions and true values",
)
self._log("Predicting")
self.y_pred = self.model.predict(self.X_test)
if len(self.y_pred.shape) == 1 or self.y_pred.shape[1] == 1:
self.y_pred = | pd.Series(self.y_pred, index=self.X_test.index) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
# In[3]:
sub_1_p = pd.read_csv('./output/submission_1020.csv')
sub_2_p = pd.read_csv('./output/submission_1021.csv')
sub_3_p = pd.read_csv('./output/submission_12345.csv')
sub_4_p = pd.read_csv('./output/submission_1234.csv')
sub_5_p = | pd.read_csv('./output/submission_2017.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, sparse=sparse)
expected = DataFrame(
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
dtype=np.uint8,
)
if sparse:
expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": SparseArray([0, 1, 0], dtype="uint8"),
"B_b": SparseArray([1, 1, 0], dtype="uint8"),
"B_c": SparseArray([0, 0, 1], dtype="uint8"),
}
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
}
)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ["from_A", "from_B"]
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected = expected[["C"] + cols]
typ = SparseArray if sparse else Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix="bad", sparse=sparse)
bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"]
expected = DataFrame(
[[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]],
columns=["C"] + bad_columns,
dtype=np.uint8,
)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat(
[
Series([1, 2, 3], name="C"),
Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
],
axis=1,
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse)
expected = DataFrame(
{
"B": ["b", "b", "c"],
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep="..", sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"A..a": [1, 0, 1],
"A..b": [0, 1, 0],
"B..b": [1, 1, 0],
"B..c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse)
expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"})
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=["too few"], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=["bad"], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {"A": "from_A", "B": "from_B"}
df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
}
)
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index(
axis=1
)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_a": arr([1, 0, 1, 0], dtype=typ),
"A_b": arr([0, 1, 0, 0], dtype=typ),
"A_nan": arr([0, 0, 0, 1], dtype=typ),
"B_b": arr([1, 1, 0, 0], dtype=typ),
"B_c": arr([0, 0, 1, 0], dtype=typ),
"B_nan": arr([0, 0, 0, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
"cat_x": arr([1, 0, 0], dtype=typ),
"cat_y": arr([0, 1, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"get_dummies_kwargs,expected",
[
(
{"data": DataFrame({"ä": ["a"]})},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["ä"]})},
DataFrame({"x_ä": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
DataFrame({"xäa": [1]}, dtype=np.uint8),
),
],
)
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list("aaa")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected = DataFrame(index=list("ABC"))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ["a", "b", np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({"b": [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse)
exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=np.uint8).reindex(
["b", np.nan], axis=1
)
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies(
[np.nan], dummy_na=True, drop_first=True, sparse=sparse
)
exp_just_na = DataFrame(index=np.arange(1))
tm.assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=np.uint8)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame(
{"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]}
)
cols = ["A_b", "B_c", "cat_y"]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[["C", "A_b", "B_c", "cat_y"]]
if sparse:
for col in cols:
expected[col] = SparseArray(expected[col])
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(
df, dummy_na=True, drop_first=True, sparse=sparse
).sort_index(axis=1)
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_b": [0, 1, 0, 0],
"A_nan": [0, 0, 0, 1],
"B_c": [0, 0, 1, 0],
"B_nan": [0, 0, 0, 1],
}
)
cols = ["A_b", "A_nan", "B_c", "B_nan"]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = SparseArray(expected[col])
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True, sparse=sparse)
expected = expected[["C", "A_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_get_dummies_int_int(self):
data = Series([1, 2, 1])
result = get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(Categorical(["a", "b", "a"]))
result = get_dummies(data)
expected = DataFrame(
[[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=np.uint8
)
tm.assert_frame_equal(result, expected)
def test_get_dummies_int_df(self, dtype):
data = DataFrame(
{
"A": [1, 2, 1],
"B": Categorical(["a", "b", "a"]),
"C": [1, 2, 1],
"D": [1.0, 2.0, 1.0],
}
)
columns = ["C", "D", "A_1", "A_2", "B_a", "B_b"]
expected = DataFrame(
[[1, 1.0, 1, 0, 1, 0], [2, 2.0, 0, 1, 0, 1], [1, 1.0, 1, 0, 1, 0]],
columns=columns,
)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = get_dummies(data, columns=["A", "B"], dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype, ordered):
# GH13854
cat = Categorical(list("xy"), categories=list("xyz"), ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]], dtype=self.effective_dtype(dtype))
cols = CategoricalIndex(
cat.categories, categories=cat.categories, ordered=ordered
)
expected = DataFrame(data, columns=cols, dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sparse", [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(dict([("GDP", [1, 2]), ("Nation", ["AB", "CD"])]))
df = get_dummies(df, columns=["Nation"], sparse=sparse)
df2 = df.reindex(columns=["GDP"])
tm.assert_frame_equal(df[["GDP"]], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame(
[[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]],
columns=["A", "A_a", "A_b", "A_b", "A_c"],
dtype=np.uint8,
).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
def test_get_dummies_all_sparse(self):
df = DataFrame({"A": [1, 2]})
result = get_dummies(df, columns=["A"], sparse=True)
dtype = SparseDtype("uint8", 0)
expected = DataFrame(
{
"A_1": SparseArray([1, 0], dtype=dtype),
"A_2": | SparseArray([0, 1], dtype=dtype) | pandas.core.arrays.sparse.SparseArray |
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import seaborn as sns
sns.set_style("whitegrid")
import sys
import os
from pathlib import Path
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold,RepeatedKFold, learning_curve
from xgboost.sklearn import XGBClassifier
from utils import data_handler
from utils import bayesiantests as bt
root_dir = str(Path(os.getcwd())) #.parent
to_dir = root_dir + '/results/'
import warnings
warnings.filterwarnings('ignore')
#res= None
##------------------------------ font, fig size setup------------------------------
plt.rc('font', family='serif')
def set_fig_fonts(SMALL_SIZE=22, MEDIUM_SIZE=24,BIGGER_SIZE = 26):
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
set_fig_fonts()
##------------------------------functions----------------------------------------
def save_fig(fig, title):
to_path = data_handler.format_title(to_dir,title,'.png')
fig.savefig(to_path ,dpi=1000,bbox_inches="tight",pad_inches=0)#, bbox_inches='tight', pad_inches=10
print("Successfully saved to: ",to_path)
return to_path
def plot_correlation_matrix(X,title, col_list, toSaveFig=True):
set_fig_fonts(12,14,16)
# standardization
scaler = StandardScaler()
df_transf = scaler.fit_transform(X)
df = pd.DataFrame(df_transf,columns = col_list)
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('coolwarm', 30)
#cax = ax1.pcolor(df.corr(), cmap=cmap, vmin=-1, vmax=1)
mat = df.corr()
flip_mat = mat.iloc[::-1]
cax = ax1.imshow(flip_mat , interpolation="nearest", cmap=cmap,vmin=-1, vmax=1)
ax1.grid(True)
#plt.suptitle('Features\' Correlation', y =0)
labels=df.columns.tolist()
x_labels = labels.copy()
labels.reverse()
#ax1.xaxis.set_ticks_position('top')
ax1.set_xticks(np.arange(len(labels)))#np.arange(len(labels))
ax1.set_yticks(np.arange(len(labels)))
# want a more natural, table-like display
#ax1.xaxis.tick_top()
ax1.set_xticklabels(x_labels, rotation = -45, ha="left") #, , rotation = 45,horizontalalignment="left"
ax1.set_yticklabels(labels, ha="right")
#plt.xticks(rotation=90)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, boundaries=np.linspace(-1,1,21),ticks=np.linspace(-1,1,5))
plt.show()
if(toSaveFig):
save_fig(fig,title+'_confusion_matrix')
set_fig_fonts()
def plot_ROC_curve(pipe, tuned_parameters, title = 'roc_curve', save_csv = True,task=0):
# cross validation settup
Ntrials = 1
outter_nsplit = 10
inner_nsplit = 10
# Results store
Y_true = pd.Series(name='Y_true')
pred_results = pd.Series(name='pred_prob')
# load data
assert (task ==0 or task ==2),'Error: invalid task spec!'
X_df, Y_df = data_handler.load_XY(task)
X = X_df.values
Y = Y_df.values
for i in range(Ntrials):
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
print("progress >> ",j,' / ',outter_nsplit)
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
inner_cv = StratifiedKFold(n_splits=inner_nsplit, shuffle=False, random_state=j)
clf = GridSearchCV(pipe,tuned_parameters, cv=inner_cv,scoring='roc_auc')
clf.fit(X_train, Y_train)
pred = pd.Series(clf.predict_proba(X_test)[:,1])
pred_results = pd.concat([pred_results, pred], axis=0,ignore_index=True)
Y_test_df = pd.Series(Y_test,name='Y_test')
Y_true = pd.concat([Y_true,Y_test_df], axis=0,ignore_index=True)
# plotting
fpr, tpr, thresholds = metrics.roc_curve(Y_true,pred_results)
roc_auc = metrics.auc(fpr, tpr)
auc_value = metrics.roc_auc_score(Y_true, pred_results)
fig = plt.figure(figsize=(12,12/1.618))
ax1 = fig.add_subplot(111)
labl = np.linspace(0,1,6)
labels = [float("{0:.2f}".format(x)) for x in labl]
ax1.set_xticks(labels)
ax1.set_xticklabels(labels)
labels[0] = ''
ax1.set_yticklabels(labels)
plt.grid(False)
ax1.plot(fpr, tpr, lw=2, label='ROC curve (area = {:.2f})'.format(auc_value),marker='.', linestyle='-', color='b')
ax1.plot([0,1],[0,1], linestyle='--', color='k')
ax1.set_xlabel('False Positive Rate')
ax1.set_ylabel('True Positive Rate')
ax1.set_xlim(0, 1)
ax1.set_ylim(0,1)
ax1.legend(loc='lower right')
color = 'black'
plt.setp(ax1.spines.values(), color=color)
ax1.yaxis.set_visible(True)
ax1.xaxis.set_visible(True)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.get_yaxis().set_tick_params(direction='out', width=2)
plt.show()
fig.savefig(data_handler.format_title(to_dir,title+'_ROC_curve','.png'),dpi=1000,bbox_inches="tight",pad_inches=0)
# save results to csv if true
if save_csv:
data_mat = np.array([fpr,tpr]).T
ret = pd.DataFrame(data_mat,columns=['fpr','tpr'])
data_handler.save_csv(ret,title+'_ROC_curve')
return True;
def plot_learning_curve_versus_tr_epoch(title='',ntrials=1, nfolds=10, save_csv=False,verbose=True, save_fig=False):
X_df,Y_df = data_handler.load_XY()
X = X_df.values
Y = Y_df.values
_ylabel = 'Mean AUROC'
n_jobs=4
# cross validation settup
Ntrials = ntrials
outter_nsplit = nfolds
tot_count = Ntrials * outter_nsplit
# Results store
train_mat = np.zeros((tot_count,500))
test_mat = np.zeros((tot_count,500))
for i in range(Ntrials):
init_time = time.time()
print("trial = ",i)
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
count = i * outter_nsplit + j
print(str(count), " / ",str(tot_count))
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
eval_sets = [(X_train, Y_train), (X_test,Y_test)]
clf = XGBClassifier(objective="binary:logistic",min_child_weight=1,**{'tree_method':'exact'},silent=True,
n_jobs=4,random_state=3,seed=3,
learning_rate=0.01,
colsample_bylevel=0.9,
colsample_bytree=0.9,
n_estimators=500,
gamma=0.8,
max_depth =11,
reg_lambda = 0.8,
subsample=0.4)
clf.fit(X_train,Y_train, eval_metric=['auc'], eval_set = eval_sets, verbose=False)
results = clf.evals_result()
epochs = len(results['validation_0']['auc'])
# record results
train_mat[count] = results['validation_0']['auc']
test_mat[count] = results['validation_1']['auc']
if(verbose):
print('Iter: %d, epochs: %d'%(count, epochs))
print('training result: %.4f, testing result: %.4f'%(train_mat[count][499], test_mat[count][499]))
print('total time: %.4f mins'% ((time.time()-init_time)/60))
# Results store
epoch_lists=list(range(1,epochs+1))
train_results = pd.DataFrame(data=train_mat,columns=['epoch_'+str(i) for i in epoch_lists])
test_results = pd.DataFrame(data=test_mat,columns=['epoch_'+str(i) for i in epoch_lists])
if(save_csv):
data_handler.save_csv(train_results,title='mos2_learning_curve_train_raw')
data_handler.save_csv(test_results,title='mos2_learning_curve_test_raw')
print('end')
_ylim=(0.5, 1.01)
n_jobs=4
# create learning curve values
train_scores_mean = np.mean(train_mat, axis=0)
train_scores_std = np.std(train_mat, axis=0)
test_scores_mean = np.mean(test_mat, axis=0)
test_scores_std = np.std(test_mat, axis=0)
tr_size_df = pd.Series(epoch_lists, name='training_epoch')
tr_sc_m_df = pd.Series(train_scores_mean, name='training_score_mean')
val_sc_m_df = pd.Series(test_scores_mean, name='val_score_mean')
tr_sc_std_df = pd.Series(train_scores_std, name='training_score_std')
val_sc_std_df = pd.Series(test_scores_std, name='val_score_std')
if(save_csv):
res = | pd.concat([tr_size_df, tr_sc_m_df,val_sc_m_df,tr_sc_std_df,val_sc_std_df], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import pandas as pd
import numpy as np
from pdsf import sflake as sf
from utils import json_filters, geojson_convert, process_limit_data, assign_notes, get_json_from_api
def process_limits(param):
"""
"""
run_time_start = | pd.Timestamp.today() | pandas.Timestamp.today |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.