prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import io
import time
import boto3
from boto3.s3.transfer import TransferConfig
from icecream import ic
import awswrangler as wr
from datetime import datetime
import pandas as pd
from pathlib_mate import Path
boto_ses = boto3.session.Session()
s3_client = boto_ses.client("s3")
class Config:
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
key_prefix = "s3splitmerge/poc/merge-multiple-json-file"
n_file = 3
n_records_per_file = 150000
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
key_prefix = "s3splitmerge/poc/merge-multiple-json-file"
def create_test_data():
n_file = 3
n_records_per_file = 150000
columns = ["id", "value"]
value = "<EMAIL>"
for nth_file in range(1, 1+n_file):
start_id = (nth_file - 1) * n_records_per_file + 1
end_id = start_id + n_records_per_file
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# This notebook is similar to `30-gls_on_phenotypes-phenomexcan.ipynb` but for eMERGE S-MultiXcan results instead of PhenomeXcan.
#
# Since we don't have partition/clusters of eMERGE results, we selected the same set of LVs from the run on PhenomeXcan (`30-gls_on_phenotypes-phenomexcan.ipynb`); and regarding traits, we take the top 20 traits from each LVs to create a list of trait/LVs pairs to run GLSPhenoplier on.
# %% [markdown] tags=[]
# # Environment variables
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
import conf
# %% tags=[]
N_JOBS = conf.GENERAL["N_JOBS"]
display(N_JOBS)
# %% tags=[]
# %env MKL_NUM_THREADS=$N_JOBS
# %env OPEN_BLAS_NUM_THREADS=$N_JOBS
# %env NUMEXPR_NUM_THREADS=$N_JOBS
# %env OMP_NUM_THREADS=$N_JOBS
# %% [markdown] tags=[]
# # Modules
# %% tags=[]
from pathlib import Path
import statsmodels.api as sm
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
from tqdm import tqdm
from gls import GLSPhenoplier
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
N_TOP_TRAITS_FROM_LV = 20
# %% tags=[]
OUTPUT_DIR = conf.RESULTS["GLS"]
display(OUTPUT_DIR)
OUTPUT_DIR.mkdir(exist_ok=True, parents=True)
# %% tags=[]
OUTPUT_FILENAME = OUTPUT_DIR / "gls_phenotypes-emerge.pkl"
display(OUTPUT_FILENAME)
# %% [markdown] tags=[]
# # Load data
# %% [markdown] tags=[]
# ## eMERGE traits info
# %% tags=[]
# FIXME: in the future, there will be a specific entry in config for the eMERGE directory that should be replaced here
input_filepath = Path(
conf.DATA_DIR,
"emerge",
"eMERGE_III_PMBB_GSA_v2_2020_phecode_AFR_EUR_cc50_counts_w_dictionary.txt",
).resolve()
display(input_filepath)
# %% tags=[]
emerge_traits_df = pd.read_csv(
input_filepath,
sep="\t",
dtype={"phecode": str},
usecols=["phecode", "phenotype", "category"],
)
# %% tags=[]
emerge_traits_df = emerge_traits_df.rename(
columns={
"phenotype": "phecode_phenotype",
"category": "phecode_category",
}
)
# %% tags=[]
emerge_traits_df.shape
# %% tags=[]
emerge_traits_df.head()
# %% [markdown] tags=[]
# ## eMERGE (S-MultiXcan) projection
# %% tags=[]
input_filepath = Path(
conf.RESULTS["PROJECTIONS_DIR"],
"projection-emerge-smultixcan-mashr-zscores.pkl",
).resolve()
display(input_filepath)
# %% tags=[]
emerge_projection = pd.read_pickle(input_filepath)
# %% tags=[]
emerge_projection.shape
# %% tags=[]
emerge_projection.head()
# %% [markdown] tags=[]
# ## eMERGE (S-MultiXcan)
# %% tags=[]
# FIXME: in the future, there will be a specific entry in config for the eMERGE directory that should be replaced here
emerge_smultixcan_zscores_filepath = Path(
conf.DATA_DIR,
"emerge",
"gene_assoc",
"emerge-smultixcan-mashr-zscores.pkl",
).resolve()
display(emerge_smultixcan_zscores_filepath)
# %% tags=[]
_tmp = pd.read_pickle(emerge_smultixcan_zscores_filepath)
# %% tags=[]
_tmp.shape
# %% tags=[]
_tmp.head()
# %% [markdown] tags=[]
# ## GLS results on PhenomeXcan
# %% [markdown] tags=[]
# Read results obtained with `30-gls_on_phenotypes.ipynb` (PhenomeXcan)
# %% tags=[]
input_filepath = conf.RESULTS["GLS"] / "gls_phenotypes-phenomexcan.pkl"
display(input_filepath)
# %% tags=[]
gls_phenomexcan = pd.read_pickle(input_filepath)
# %% tags=[]
gls_phenomexcan.shape
# %% tags=[]
gls_phenomexcan.head()
# %% [markdown] tags=[]
# ## MultiPLIER summary
# %% tags=[]
multiplier_model_summary = | pd.read_pickle(conf.MULTIPLIER["MODEL_SUMMARY_FILE"]) | pandas.read_pickle |
# Copyright 2020, 37.78 Tecnologia Ltda.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import pickle
from sklearn.preprocessing import MultiLabelBinarizer
from constants import DATA_DIR
import utils
class MIMIC_Dataset:
def __init__(self):
self.name = 'MIMIC'
def load_preprocessed(self, path=DATA_DIR):
with open(f'{path}mimic3_data.pkl', 'rb') as file:
self.df = pickle.load(file)
def save_preprocessed(self, path=DATA_DIR):
pd.to_pickle(self.df, f'{path}mimic3_data.pkl')
def preprocess(self, verbose=1):
df_text = (pd.read_csv(f'{DATA_DIR}NOTEEVENTS.csv.gz')
.query("CATEGORY == 'Discharge summary'")
.drop_duplicates('TEXT')
.drop_duplicates('HADM_ID')
[['SUBJECT_ID','HADM_ID','TEXT']])
df_icds = ( | pd.read_csv(f'{DATA_DIR}DIAGNOSES_ICD.csv.gz') | pandas.read_csv |
import os
import df2img
import disnake
import pandas as pd
import bots.config_discordbot as cfg
from bots.config_discordbot import gst_imgur, logger
from bots.helpers import save_image
from bots.menus.menu import Menu
from gamestonk_terminal.stocks.due_diligence import ark_model
def arktrades_command(ticker: str = "", num: int = 10):
"""Displays trades made by ark [cathiesark.com]"""
# Debug user input
if cfg.DEBUG:
logger.debug("dd-arktrades %s", ticker)
if ticker:
ark_holdings = ark_model.get_ark_trades_by_ticker(ticker)
if ark_holdings.empty:
raise Exception(
"Issue getting data from cathiesark.com. Likely no trades found.\n"
)
ark_holdings["Total"] = ark_holdings["Total"] / 1_000_000
ark_holdings.rename(columns={"direction": "B/S", "weight": "F %"}, inplace=True)
ark_holdings = ark_holdings.drop(
columns=["ticker", "everything.profile.companyName"]
)
ark_holdings.index = | pd.Series(ark_holdings.index) | pandas.Series |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
| tm.assert_frame_equal(res, df) | pandas._testing.assert_frame_equal |
import pandas as pd
import dill as pickle
from load_test_data import write_dict_to_pkl
def load_pickled_model(filename):
'''
INPUT
- filename: str, path and name of the file
OUTPUT
- model: sklearn classifier model, fit already
Returns the unpickled model
'''
with open(filename, 'r') as f:
model = pickle.load(f)
f.close()
return model
def load_processed_csv_for_predictions(filename):
'''
INPUT
- filename: str, path and name of the processed csv file to make
predictions on
OUTPUT
- X: this is a 2d array of the features
- user_id_array: 1d array of the features
Returns
'''
df = | pd.read_csv(filename) | pandas.read_csv |
import os, glob
import datetime
import pandas as pd
clear_last = 1
def update_csv_ts1(dataset_name, network_name, train_acc, train_loss, test_acc, test_loss, current_epoch, da1, now):
global clear_last
new_row = [dataset_name, network_name, train_loss, test_loss, train_acc, test_acc, current_epoch, da1]
save_path = './result/result_csv/' + dataset_name + '_' + network_name + '_' + da1 + '_' + now + '_result.csv'
if clear_last == 1: # for initial
clear_last = 0
csvcolumns = ['dataset_name', 'network_name', 'train_loss', 'test_loss', 'train_acc', 'test_acc', 'epoch', 'da1']
df = pd.DataFrame(columns=csvcolumns)
if not os.path.exists('./result/result_csv/'):
os.makedirs('./result/result_csv/')
df.to_csv(save_path)
df = pd.read_csv(save_path, index_col=0)
series = pd.Series(new_row, index=df.columns)
df = df.append(series, ignore_index=True)
df.to_csv(save_path)
def update_csv_ts5(dataset_name, network_name, train_acc, train_loss, test_acc, test_loss, current_epoch, da1, p1, da2, p2, da3, p3, da4, p4, da5, p5, consis_lambda, now):
global clear_last
new_row = [dataset_name, network_name, train_loss, test_loss, train_acc, test_acc, current_epoch, da1, p1, da2, p2, da3, p3, da4, p4, da5, p5, consis_lambda]
save_path = './result/result_csv/' + dataset_name + '_' + network_name + '_' + da1 +'-' + da2 +'-' + da3 +'-' + da4 +'-' + da5 + '_' + str(consis_lambda) + '_' + now + '_result.csv'
if clear_last == 1: # for initial
clear_last = 0
csvcolumns = ['dataset_name', 'network_name', 'train_loss', 'test_loss', 'train_acc', 'test_acc', 'epoch', 'da1', 'param1', 'da2', 'param2', 'da3', 'param3', 'da4', 'param4', 'da5', 'param5', 'consis_lambda']
df = pd.DataFrame(columns=csvcolumns)
if not os.path.exists('./result/result_csv/'):
os.makedirs('./result/result_csv/')
df.to_csv(save_path)
df = pd.read_csv(save_path, index_col=0)
series = | pd.Series(new_row, index=df.columns) | pandas.Series |
#! python3
# coding: utf-8
# Script to update g values in FG-5/A-10 .project.txt files with laser drift and(or) a soil moisture correction.
#
# The script works on project.txt files in a specified directory (and subdirectories). The g value in each
# .project.txt file is updated, and a comment added that describes the magnitude of the correction. The original
# .project.txt file is copied to a new file, where 'project.txt' in the filename is replaced with 'original.txt'.
#
# Laser drift corrections are taken from an Excel workbook, specified as a parameter in the script.
#
# Soil moisture corrections are taken from a text file downloaded from the
# ORNL DAAC. 2017. Soil Moisture Visualizer. ORNL DAAC, Oak Ridge, Tennessee, USA.
# http://dx.doi.org/10.3334/ORNLDAAC/1366
#
# This service provides SMAP root zone soil moisture for a point location, from March 2015 onward. SMAP data
# represent the mean value of a 9 x 9km grid; all points within a cell have the same soil moisture time series.
#
# A csv-file summary of the corrections is written, with the filename "Corrections_YYYY-MM-DD.csv"
#
# Written for Python 2.7, updated for compatibility with Python 3.5 in 2018
#
# <NAME>
# USGS
#
# Updated 11/2/2018: Tkinter now tkinter, specify filedialog as import (lines 38-39). Updated line 61 to current tkinter syntax.
# Added lines 98-100 to check for previous correction (existing 'original.txt' file) and break loop if found.
# Changed line 101 from <if string.find(fname, 'project.txt') != -1:> to <elif fname.find('project.txt') != -1:>
# Tested on partial copy of TAMA directory on 11/2/2018. Test successful.
# Tested on a directory with some files updated and some not - by the timestamp of files
# it only applied the correction to previously uncorrected project.txt files.
#
import string
import os
from tkinter import filedialog
from tkinter import *
import datetime
import pandas as pd
from time import strftime
# User-specified options
update_laser = True
laser_cal_file = "\\\\Igswztwwgszona\\Gravity Data Archive\\Absolute Data\\A-10\\Instrument Maintenance\\Calibrations\\A10-008 clock and laser calibrations.xlsx"
laser_cal_worksheet = "DRIFT LOOKUP TABLE"
update_SM = False
if update_SM:
sm_file = "BCnw_daily-smap-ORNL-DAAC-1s19jW.txt"
write_corrections_to_file = True
# Either use a specified directory, or show a GUI for the user to decide
pwd = os.getcwd()
root = Tk()
root.withdraw()
data_directory = filedialog.askdirectory(
parent=root,initialdir=pwd)
# data_directory = u'\\\\Igswztwwgszona\\Gravity Data Archive\\Absolute Data\\A-10\\Final Data\\Big Chino'
laser_corr, drift_rate, elapsed_days = -999, -999, -999
sm_at_time_of_g, sm, sm_corr = -999, -999, -999
if update_laser:
xl = pd.ExcelFile(laser_cal_file)
df_drift = xl.parse(laser_cal_worksheet)
if update_SM:
df_sm = pd.read_csv(sm_file, header=4)
sm_date = df_sm["time"]
sm = df_sm["SMAP_rootzone"]
else:
sm_corr = -999
sm_at_time_of_g
sm = | pd.DataFrame([-999]) | pandas.DataFrame |
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyze the simulation results."""
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from wfa_cardinality_estimation_evaluation_framework.common import plotting
from wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator
from wfa_cardinality_estimation_evaluation_framework.simulations import simulator
ERROR_MARGIN_NAME = 'error_margin'
PROPORTION_OF_RUNS_NAME = 'proportion_of_runs'
ERROR_MARGIN = 0.05
PROPORTION_OF_RUNS = 0.95
RUNNING_TIME_SCALE = 3600 # Convert seconds to hour.
SKETCH_ESTIMATOR_NAME = 'sketch_estimator'
SCENARIO_NAME = 'scenario'
NUM_ESTIMABLE_SETS = 'num_estimable_sets'
RAW_RESULT_DF = 'df'
CARDINALITY_SOURCE = 'source'
CARDINALITY_VALUE = 'cardinality'
FREQUENCY_LEVEL = 'frequency_level'
# The file that summarize the maximum number of sets that can be estimated
# within 5% (or specified by the error_margin) relative error for at least 95%
# (or specified by the proportion_of_runs) runs. It has columns of estimator,
# scenario and num_estimable_sets.
NUM_ESTIMABLE_SETS_FILENAME = 'num_estimable_sets.csv'
BOXPLOT_FILENAME = 'boxplot.png'
BARPLOT_ESTIMABLE_SETS_FILENAME = 'barplot_estimable_sets.png'
BARPLOT_MAX_SETS_FILENAME = 'barplot_max_sets.png'
XLABEL_ROTATE = 'xlabel_rotate'
BOXPLOT_SIZE_WIDTH_INCH = 'boxplot_size_width_inch'
BOXPLOT_SIZE_HEIGHT_INCH = 'boxplot_size_height_inch'
BARPLOT_SIZE_WIDTH_INCH = 'barplot_size_width_inch'
BARPLOT_SIZE_HEIGHT_INCH = 'barplot_size_height_inch'
PLOT_PARAMS = {
XLABEL_ROTATE: 0,
BOXPLOT_SIZE_WIDTH_INCH: 12,
BOXPLOT_SIZE_HEIGHT_INCH: 6,
BARPLOT_SIZE_WIDTH_INCH: 12,
BARPLOT_SIZE_HEIGHT_INCH: 6,
}
# Variables related with getting analysis results.
SKETCH_ESTIMATOR_COLNAME = 'sketch_estimator'
RUNNING_TIME_COLNAME = 'running_time'
KEY_DESCRIPTION_TO_FILE_DIR = 'description_to_file_dir'
KEY_NUM_ESTIMABLE_SETS_STATS_DF = 'num_estimable_sets_stats_df'
KEY_RUNNING_TIME_DF = 'running_time_df'
def get_num_estimable_sets(df, num_sets, relative_error, error_margin,
proportion_of_runs):
"""Get the number of estimable sets.
For example, set error_margin = 0.05 and proportion_of_runs = 0.95. Then
the number of estimable sets is defined as the number of sets whose union
cardinality can be estimated such that 95% of the runs are within a 5%
relative error.
Args:
df: a pd.DataFrame that have columns of num_sets and relative_error.
num_sets: a column name in df that specifies the number of sets.
relative_error: a column name in df that specifies the relative error.
error_margin: a positive number setting the upper bound of the error. By
default, set to 0.05.
proportion_of_runs: a number between 0 and 1 that specifies the proportion
of runs. By default, set to 0.95.
Returns:
The number of estimable sets.
"""
if not set([num_sets, relative_error]).issubset(df.columns):
raise ValueError(f'{num_sets} or {relative_error} not found in df.')
def count_estimable(e):
return np.mean(np.abs(e) < error_margin) >= proportion_of_runs
df_estimable = (
df[df[num_sets] > 1].groupby(num_sets)
.agg({relative_error: count_estimable}))
df_estimable = df_estimable.rename(
columns={relative_error: 'is_estimable'})
num_of_estimable = 0
for n in df_estimable.index.values:
if df_estimable.loc[n, 'is_estimable']:
num_of_estimable = n
else:
break
return num_of_estimable
class EstimatorEvaluationAnalyzer(object):
"""Analyze the estimator evaluation results."""
def __init__(self, out_dir, evaluation_directory, evaluation_run_name,
evaluation_name, estimable_criteria_list,
plot_params=None):
"""Construct an analyzer.
Args:
out_dir: the output directory of analysis results.
evaluation_directory: the output directory of evaluation results. The
analyzer will read the evaluation results and output summary tables and
plots.
evaluation_run_name: the run name of the evaluation.
evaluation_name: the name of the evaluation config.
estimable_criteria_list: a list of tuples of error_margin and
proportion_of_runs. An error_margin is a positive number setting the
upper bound of the error, and the proportion_of_runs is a number
between 0 and 1 that specifies the desired proportion of runs within
the error margin.
plot_params: a dictionary of the parameters of plot functions. If not
given, will use PLOT_PARAMS. Also see PLOT_PARAMS for how it is defined.
"""
self.estimable_criteria_list = estimable_criteria_list
if plot_params is None:
self.plot_params = PLOT_PARAMS
else:
self.plot_params = plot_params
# Get all the raw results.
self.evaluation_file_dirs = evaluator.load_directory_tree(
out_dir=evaluation_directory,
run_name=evaluation_run_name,
evaluation_name=evaluation_name)
self.raw_df = (
CardinalityEstimatorEvaluationAnalyzer
.read_evaluation_results(self.evaluation_file_dirs))
# Create the analysis directory.
if out_dir is None:
out_dir = os.getcwd()
if out_dir != evaluation_directory:
shutil.copytree(
self.evaluation_file_dirs[evaluator.KEY_RUN_DIR],
os.path.join(out_dir, evaluation_run_name))
self.analysis_file_dirs = evaluator.load_directory_tree(
out_dir=out_dir,
run_name=evaluation_run_name,
evaluation_name=evaluation_name)
def __call__(self):
num_estimable_sets_stats_df = (
self.get_relative_error_stats_of_num_of_estimable_sets())
df_filename = os.path.join(
self.analysis_file_dirs[evaluator.KEY_EVALUATION_DIR],
NUM_ESTIMABLE_SETS_FILENAME)
with open(df_filename, 'w') as f:
num_estimable_sets_stats_df.to_csv(f, index=False)
self._save_plot_num_sets_vs_metric()
@classmethod
def read_evaluation_results(cls, file_dirs):
"""Read evaluation results.
Args:
file_dirs: a dictionary of file directories of the evaluation which is
generated by the create_directory method of evaluator.Evaluation.
Returns:
A pandas.DataFrame containing columns of the estimator name, the scenario
name, and the corresponding raw evaluation result data frame.
"""
df_list = []
for estimator_name in file_dirs[evaluator.KEY_ESTIMATOR_DIRS].keys():
for scenario_name in file_dirs[estimator_name].keys():
df_file = os.path.join(
file_dirs[estimator_name][scenario_name],
evaluator.RAW_RESULT_DF_FILENAME)
with open(df_file, 'r') as f:
df = pd.read_csv(f)
df[SKETCH_ESTIMATOR_NAME] = estimator_name
df[SCENARIO_NAME] = scenario_name
df_list.append(df)
return pd.concat(df_list, ignore_index=True)
def get_num_estimable_sets_df(self):
"""Summarize the number of estimable sets by estimators and scenarios."""
def _get_num_estimable_sets_series(df, **kwargs):
return pd.Series({
NUM_ESTIMABLE_SETS: get_num_estimable_sets(df, **kwargs)})
df_list = []
for criteria in self.estimable_criteria_list:
df = self.raw_df.groupby([SKETCH_ESTIMATOR_NAME, SCENARIO_NAME]).apply(
_get_num_estimable_sets_series,
num_sets=simulator.NUM_SETS,
relative_error=self.error_metric_column,
error_margin=criteria[0],
proportion_of_runs=criteria[1]).reset_index()
df[ERROR_MARGIN_NAME] = criteria[0]
df[PROPORTION_OF_RUNS_NAME] = criteria[1]
df_list.append(df)
return | pd.concat(df_list, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.4.0
@file: GSP_main.py
@time: 2021/1/26 10:50
@functions: graph signal processing main script
@update: support Yeo-ICN definition
@update: support ICN-level brain activity and connecitivty strength saving
"""
import numpy as np
import glob
import os
import time
import matplotlib.pyplot as plt
from pygsp import graphs, filters, plotting
from GSP_utilities import surrogate_BOLD_create, save_variable, load_variable
import pandas as pd
from dppd import dppd
dp, X = dppd()
# 1. path locations and parameters
start = time.time()
deriv_path = '/home/amax/data/cye/MScohort_BIDS_clean/derivatives'
connectome_path = os.path.join(deriv_path, 'mrtrix')
xcpengine_path = os.path.join(deriv_path, 'xcpengine')
network_assign_path = 'CAB-NP_v1.1_Labels-ReorderedbyNetworks_Yeo.csv'
num_BOLD_timepoints = 180
num_rand = 100 # number of surrogates
functional_type = 'BOLD'
tract_type = 'meanlength' # one of the following: invlength, invnodevol, level-participant_connectome, meanlength
ICN_type = 'Yeo' # one of the following: 'Yeo', 'Cole'
normalize_type = 'both' # 'W': normalize W; 'L': normalize Laplacian (Preti method); 'both': normalize both W and Laplacian
# 2. read network assignment for hcpmmp
network_assign_csv = pd.read_csv(network_assign_path)
network_assign_csv = dp(network_assign_csv).mutate(NETWORK=X.Yeo_NETWORK).pd
network_assign_csv = dp(network_assign_csv).mutate(NETWORKKEY=X.Yeo_NETWORKKEY).pd
num_network_df = dp(network_assign_csv).summarise((X.NETWORKKEY, np.max, 'hp_max')).pd
num_network = num_network_df.iloc[0,0]
network_rowindex_ls = []
for network_i in range(1,num_network+1):
df_network = dp(network_assign_csv).filter_by(X.NETWORKKEY == network_i).pd
network_rowindex_ls.append(df_network.index.values)
network_unique_df = dp(network_assign_csv).distinct('NETWORKKEY').pd
network_unique_df = network_unique_df.sort_values(by='NETWORKKEY',ascending = True)
network_unique_df = dp(network_unique_df).filter_by(-X.NETWORK.isin(['Undefine'])).pd # remove undefined ICN
network_unique_df = network_unique_df.reset_index()
# 3. define group of interests
cohort1 = 'ms'
cohort2 = 'nc'
cohort3 = 'nmo'
cohort4 = 'cis'
cohort1_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort1 + '*'))
cohort2_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort2 + '*'))
cohort3_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort3 + '*'))
cohort4_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort4 + '*'))
cohort_connectome_ls = cohort1_connectome_ls + cohort2_connectome_ls + cohort3_connectome_ls + cohort4_connectome_ls
cohort_connectome_ls.sort()
cohort1_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort1 + '*'))
cohort2_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort2 + '*'))
cohort3_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort3 + '*'))
cohort4_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort4 + '*'))
cohort_fmri_ls = cohort1_fmri_ls + cohort2_fmri_ls + cohort3_fmri_ls + cohort4_fmri_ls
cohort_name_ls = [os.path.basename(item) for item in cohort_connectome_ls]
remove_name_ls = ['sub-nc011','sub-nc039', 'sub-nmo002', 'sub-nmo019', 'sub-cis002','sub-cis015', 'sub-ms015'] # problematic cases
cohort_name_ls = list(set(cohort_name_ls) - set(remove_name_ls)) # remove problematic cases
for i in remove_name_ls: # remove problematic cases
cohort_connectome_ls = [x for x in cohort_connectome_ls if i not in x]
cohort_fmri_ls = [x for x in cohort_fmri_ls if i not in x]
cohort_name_ls.sort()
cohort_connectome_ls.sort()
cohort_fmri_ls.sort()
if len(cohort_connectome_ls) != len(cohort_fmri_ls):
print('Number of connectome and xcpengine results not matched')
# 4. create a dataframe to store individual filepath
path_dict = {'subname':cohort_name_ls, 'mrtrix_path': cohort_connectome_ls, 'xcp_path':cohort_fmri_ls}
path_df = pd.DataFrame(path_dict, columns=['subname','mrtrix_path','xcp_path'])
path_df = dp(path_df).mutate(connectome_path=X.mrtrix_path + '/connectome/' + X.subname +'_parc-hcpmmp1_' + tract_type + '.csv').pd
path_df = dp(path_df).mutate(BOLD_series_path=X.xcp_path + '/fcon/hcpmmp/hcpmmp.1D').pd
path_df = dp(path_df).mutate(fmri_map_path=X.xcp_path + '/roiquant/hcpmmp/' + X.subname +'_hcpmmp_mean.csv').pd
print('finished step 4')
# 5. load individual connectome as ndarray
num_parcels = len(network_assign_csv)
num_sub = len(path_df)
path_df_nc = dp(path_df).filter_by(X.subname.str.contains('nc')).pd
num_nc = len(path_df_nc)
nc_idx = path_df_nc.index
connectome_array = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
indiviudal_connectome = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
connectome_array[:,:,sub_idx] = indiviudal_connectome
# 6. load individual BOLD series and fill missing part according to /fcon/hcpmmp/missing.txt
BOLD_series_3D = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
BOLD_series = np.genfromtxt(path_df.loc[sub_idx, 'BOLD_series_path'])
BOLD_series = BOLD_series.T
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if BOLD_series[missing_parcel_id-1,:].sum() != 0:
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_parcel_id-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
else: # multiple parcels missing
for missing_idx in missing_parcel_id:
network_key = network_assign_csv.loc[missing_idx-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_idx-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
BOLD_series_3D[:,:,sub_idx] = BOLD_series
print('finished loading individual BOLD series and filling missing part')
# 7. load fmri parametric map and fill missing part according to /fcon/hcpmmp/missing.txt
fmri_paramap = np.zeros(shape=(num_parcels, num_sub))
paramap_str = 'mean_alffZ'
for sub_idx in range(len(path_df)):
fmri_map = pd.read_csv(path_df.loc[sub_idx, 'fmri_map_path'],index_col=0)
fmri_map = fmri_map.loc[:,paramap_str]
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if not np.isnan(fmri_map[missing_parcel_id]):
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
fmri_map[int(missing_parcel_id)] = np.mean(fmri_map[network_parcel_idx])
fmri_map = fmri_map.to_numpy()
else: # multiple parcels missing
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_rowindex_ls = np.array(network_rowindex_ls, dtype=object)
network_parcel_idx = network_rowindex_ls[network_key-1]
for parcel_i in range(missing_parcel_id.size):
fmri_map[int(missing_parcel_id[parcel_i])] = np.mean(fmri_map[network_parcel_idx[parcel_i]])
fmri_map = fmri_map.to_numpy()
fmri_paramap[:,sub_idx] = fmri_map
print('finished loading fmri parametric map and fill missing part')
# 8. load connectome and functional signal and do GSP
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
func_sig = BOLD_series_3D
s_head_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_rand_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub, num_rand))
else:
raise ValueError('undefined functional signal')
G_U_cohort = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
W = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
# Symmetric Normalization of adjacency matrix
D = np.diag(np.sum(W,1)) #degree
D_power = np.power(D, (-1/2))
D_power[np.isinf(D_power)] = 0
Wsymm = D_power @ W @ D_power
#The eigenvector matrix G.U is used to define the Graph Fourier Transform of the graph signal S
if normalize_type == 'W':
G = graphs.Graph(Wsymm)
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'L':
G = graphs.Graph(W, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'both':
Wsymm = np.triu(Wsymm) + np.triu(Wsymm).T - np.diag(np.triu(Wsymm).diagonal()) # force symmetric
G = graphs.Graph(Wsymm, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
# L = np.eye(len(Wsymm)) - Wsymm
# lamda, U = np.linalg.eig(L)
# U = U[:, np.argsort(lamda)]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head = U.T @ func_sig[:,:,sub_idx]
s_head_cohort[:,:,sub_idx] = s_head
# calcualte surrogate for individual
s_rand_cohort[:,:,sub_idx,:] = surrogate_BOLD_create(U, func_sig[:,:,sub_idx], num_rand)
print('finished Graph Fourier Transform')
# save_variable(G_U_cohort, 'G_U_cohort.pkl')
# save_variable(s_head_cohort, 's_head_cohort.pkl')
# save_variable(s_rand_cohort, 's_rand_cohort.pkl')
# G_U_cohort = load_variable('G_U_cohort.pkl')
# s_head_cohort = load_variable('s_head_cohort.pkl')
# s_rand_cohort = load_variable('s_rand_cohort.pkl')
# 8.5(optional). plot Sihag2020 plot
# take nc001 as example
nc001_idx = path_df.subname[path_df.subname == 'sub-nc001'].index.tolist()[0]
s_low = G_U_cohort[:,0:4, nc001_idx] @ s_head_cohort[0:4,:,nc001_idx]
s_high = G_U_cohort[:,-55:-51, nc001_idx] @ s_head_cohort[-55:-51,:,nc001_idx]
np.savetxt("nc001_s_low_both.csv", s_low, delimiter=",")
np.savetxt("nc001_s_high_both.csv", s_high, delimiter=",")
# 9. calculate the median-split threshold
NC_index = [cohort_name_ls.index(x) for x in cohort_name_ls if 'nc' in x]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head_NC = s_head_cohort[:,:,NC_index]
s_head_NC_square = np.power(s_head_NC, 2)
#s_head_NC_square = np.power(s_head_NC_square, 1/2)
s_head_NC_square_mean = np.mean(s_head_NC_square, (1,2)) # average for each timepoint and each subject
s_head_NC_AUCTOT = np.trapz(s_head_NC_square_mean)
i=0
AUC=0
while AUC < s_head_NC_AUCTOT/2:
AUC = np.trapz(s_head_NC_square_mean[:i])
i = i + 1
cutoff = i-1
print('finished calculating the median-split threshold')
print('cutoff = {}'.format(cutoff))
# 10. calculate decoupling index for empirical data
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_liberal_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
s_aligned_cohort[:,:,sub_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_head_cohort[0:cutoff,:,sub_idx]
s_liberal_cohort[:,:,sub_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_head_cohort[cutoff-1:-1,:,sub_idx]
s_aligned_individual = np.linalg.norm(s_aligned_cohort, ord=2, axis=1)
s_liberal_individual = np.linalg.norm(s_liberal_cohort, ord=2, axis=1)
s_deCoupIdx_individual = s_liberal_individual / s_aligned_individual
s_aligned = np.mean(s_aligned_individual[:,nc_idx], axis=1)
s_liberal = np.mean(s_liberal_individual[:,nc_idx], axis=1)
s_deCoupIdx_node = s_liberal/s_aligned # only for NC
print('finished calculating decoupling index for empirical data')
# 11. calculate decoupling index for surrogate data only for NC
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
s_liberal_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
for i, sub_idx in enumerate(nc_idx):
for rand_idx in range(num_rand):
s_aligned_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_rand_cohort[0:cutoff,:,sub_idx,rand_idx]
s_liberal_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_rand_cohort[cutoff-1:-1,:,sub_idx,rand_idx]
# norm for BOLD timepoints
s_aligned_norm_rand = np.linalg.norm(s_aligned_cohort_rand, ord=2, axis=1)
s_liberal_norm_rand = np.linalg.norm(s_liberal_cohort_rand, ord=2, axis=1)
# average for cohorts
s_aligned_rand = np.mean(s_aligned_norm_rand, axis=1)
s_liberal_rand = np.mean(s_liberal_norm_rand, axis=1)
# decoupling index
s_deCoupIdx_node_rand = s_liberal_rand/s_aligned_rand
print('finished calculating decoupling index for surrogate data')
# 12. network-level harmonics for emperical and surrogate data
s_aligned_network = np.zeros(shape=(num_network))
s_liberal_network = np.zeros(shape=(num_network))
s_aligned_network_individual = np.zeros(shape=(num_network, num_sub))
s_liberal_network_individual = np.zeros(shape=(num_network, num_sub))
s_aligned_network_rand = np.zeros(shape=(num_network, num_rand))
s_liberal_network_rand = np.zeros(shape=(num_network, num_rand))
for i in range(num_network):
s_aligned_network[i] = np.mean(s_aligned[network_rowindex_ls[i]])
s_liberal_network[i] = np.mean(s_liberal[network_rowindex_ls[i]])
s_aligned_network_individual[i,:] = np.mean(s_aligned_individual[network_rowindex_ls[i],:], axis=0)
s_liberal_network_individual[i,:] = np.mean(s_liberal_individual[network_rowindex_ls[i],:], axis=0)
s_aligned_network_rand[i,:] = np.mean(s_aligned_rand[network_rowindex_ls[i],:], axis=0)
s_liberal_network_rand[i,:] = np.mean(s_liberal_rand[network_rowindex_ls[i],:], axis=0)
s_deCoupIdx_network = s_liberal_network/s_aligned_network
s_deCoupIdx_network_individual = s_liberal_network_individual/s_aligned_network_individual
s_deCoupIdx_network_rand = s_liberal_network_rand/s_aligned_network_rand
# 13. brain-level harmonics for emperical and surrogate data
s_aligned_brain = np.mean(s_aligned)
s_liberal_brain = np.mean(s_liberal)
s_deCoupIdx_brain = s_liberal_brain/s_aligned_brain
s_aligned_brain_individual = np.mean(s_aligned_individual, axis=0)
s_liberal_brain_individual = np.mean(s_liberal_individual, axis=0)
s_deCoupIdx_brain_individual = s_liberal_brain_individual/s_aligned_brain_individual
s_aligned_brain_rand = np.mean(s_aligned_rand, axis=0)
s_liberal_brain_rand = np.mean(s_liberal_rand, axis=0)
s_deCoupIdx_brain_rand = s_liberal_brain_rand/s_aligned_brain_rand
print('s_deCoupIdx_brain = {}'.format(s_deCoupIdx_brain))
# 14. significance of surrogate for plot
# node-level
s_deCoupIdx_node_significance = np.logical_or((np.percentile(s_deCoupIdx_node_rand, 5, axis=1) >= s_deCoupIdx_node), (np.percentile(s_deCoupIdx_node_rand, 95, axis=1) <= s_deCoupIdx_node))
s_deCoupIdx_node_significance = s_deCoupIdx_node_significance.astype(np.int)
# network-level
s_deCoupIdx_network_significance = np.logical_or((np.percentile(s_deCoupIdx_network_rand, 5, axis=1) >= s_deCoupIdx_network), (np.percentile(s_deCoupIdx_network_rand, 95, axis=1) <= s_deCoupIdx_network))
s_deCoupIdx_network_significance = s_deCoupIdx_network_significance.astype(np.int)
# brain-level
s_deCoupIdx_brain_significance = np.logical_or((np.percentile(s_deCoupIdx_brain_rand, 5, axis=0) >= s_deCoupIdx_brain), (np.percentile(s_deCoupIdx_brain_rand, 95, axis=0) <= s_deCoupIdx_brain))
# 15. save results to csv
if normalize_type == 'W':
normalize_str = '_W'
elif normalize_type == 'L':
normalize_str = '_L'
elif normalize_type == 'both':
normalize_str = '_both'
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
csv_folder = 'BOLD_4D_' + tract_type + '_' + normalize_str
if not os.path.exists(os.path.abspath(csv_folder)):
os.mkdir(os.path.abspath(csv_folder))
# save surrogate (ndarray with num_rand × num_region)
s_deCoupIdx_node_rand_df = pd.DataFrame(data = s_deCoupIdx_node_rand.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_rand_df = pd.DataFrame(data = s_deCoupIdx_network_rand.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_rand_df = pd.DataFrame(data = s_deCoupIdx_brain_rand)
s_deCoupIdx_node_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_rand_df.csv'))
s_deCoupIdx_network_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_rand_df.csv'))
s_deCoupIdx_brain_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_rand_df.csv'))
# save surrogate significance (ndarray with 1 × num_region)
s_deCoupIdx_node_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node_significance, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network_significance, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_significance_df.csv'))
s_deCoupIdx_network_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_significance_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_significance.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain_significance))
# save empirical harmonics for NC cohort (for plot usage, ndarray with 1 × num_region)
s_deCoupIdx_node_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_empirical_df.csv'))
s_deCoupIdx_network_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' +'-network_empirical_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_empirical.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain))
# save subject-level harmonics (ndarray with num_sub × num_region)
s_deCoupIdx_node_individual_df = pd.DataFrame(data = s_deCoupIdx_individual.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_individual_df = pd.DataFrame(data = s_deCoupIdx_network_individual.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_individual_df = pd.DataFrame(data = s_deCoupIdx_brain_individual)
s_deCoupIdx_node_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_node_individual_df],axis=1)
s_deCoupIdx_network_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_network_individual_df],axis=1)
s_deCoupIdx_brain_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_brain_individual_df],axis=1)
s_deCoupIdx_node_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_individual_df.csv'))
s_deCoupIdx_network_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_individual_df.csv'))
s_deCoupIdx_brain_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_individual_df.csv'))
# 16.(optional) save connectivity strength
# parcel-level
connectome_parcel_individual = np.zeros(shape=(num_sub, num_parcels))
# mean of nonzero
def non_zero_mean(np_arr):
exist = (np_arr != 0)
num = np_arr.sum(axis=1)
den = exist.sum(axis=1)
return num/den
for sub_idx in range(num_sub):
connectome_parcel_individual[sub_idx,:] = non_zero_mean(connectome_array[:,:,sub_idx])
connectome_parcel_individual_df = pd.DataFrame(data = connectome_parcel_individual, columns = network_assign_csv.loc[:,'LABEL'])
connectome_parcel_individual_df = pd.concat([path_df.loc[:,'subname'], connectome_parcel_individual_df],axis=1)
connectome_parcel_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 'connectome_' + '-parcel_individual_df.csv'))
# ICN-level
connectome_network_individual = np.zeros(shape=(num_network, num_sub))
for i in range(num_network):
network_i = network_unique_df.loc[i,'NETWORK']
parcel_network_df = dp(network_assign_csv).filter_by(X.NETWORK.isin([network_i])).pd
parcel_network_id = parcel_network_df.loc[:,'INDEX'].to_numpy()
connectome_network_individual[i,:] = np.mean(connectome_array[np.ix_(parcel_network_id-1,parcel_network_id-1)], axis=(0,1))
connectome_network_individual_df = | pd.DataFrame(data = connectome_network_individual.T, columns = network_unique_df.loc[:,'NETWORK']) | pandas.DataFrame |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = | pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 17 09:22:24 2020
@author: tjcombs
The data is from the following source:
https://www.kaggle.com/anikannal/solar-power-generation-data
"""
import pandas as pd
def get_generation_data():
'''
Returns a DataFrame of the solar power generation data.
DATE_TIME: Date and time for each observation. Observations recorded at 15
minute intervals.
PLANT_ID:
PLANT_ID = 4135001 ==> PLANT = 1
PLANT_ID = 4136001 ==> PLANT = 2
SOURCE_KEY: The inverter id.
DC_POWER: Amount of DC power generated by the inverter (source_key) in this
15 minute interval. Units - kW.
AC_POWER: Amount of AC power generated by the inverter (source_key) in this
15 minute interval. Units - kW.
DAILY_YIELD: Daily yield is a cumulative sum of power generated on that
day, till that point in time.
TOTAL_YIELD: This is the total yield for the inverter till that point in
time.
'''
plant1_generation = pd.read_csv('archive/Plant_1_Generation_Data.csv')
plant1_generation['DATE_TIME'] = pd.to_datetime(plant1_generation['DATE_TIME'])
plant2_generation = pd.read_csv('archive/Plant_2_Generation_Data.csv')
plant2_generation['DATE_TIME'] = pd.to_datetime(plant2_generation['DATE_TIME'])
# The two data sets have the same columns
assert(set(plant1_generation) == set(plant2_generation))
# Combine datasets
generation = | pd.concat((plant1_generation, plant2_generation)) | pandas.concat |
import QUANTAXIS as QA
import numpy as np
import talib
import pandas as pd
import scipy.signal as signal
import matplotlib.pyplot as plt
# 定义MACD函数
def TA_MACD(prices, fastperiod=12, slowperiod=26, signalperiod=9):
'''
参数设置:
fastperiod = 12
slowperiod = 26
signalperiod = 9
返回: macd - dif, signal - dea, hist * 2 - bar, delta
'''
macd, signal, hist = talib.MACD(prices,
fastperiod=fastperiod,
slowperiod=slowperiod,
signalperiod=signalperiod)
delta = np.r_[np.nan, np.diff(hist * 2)]
return np.c_[macd, signal, hist * 2, delta]
# 定义MA函数
def TA_MA(prices, timeperiod=5):
'''
参数设置:
timeperiod = 5
返回: ma
'''
ma = talib.MA(prices, timeperiod=timeperiod)
return ma
# 定义RSI函数
def TA_RSI(prices, timeperiod=12):
'''
参数设置:
timeperiod = 12
返回: ma
'''
rsi = talib.RSI(prices, timeperiod=timeperiod)
delta = np.r_[np.nan, np.diff(rsi)]
return np.c_[rsi, delta]
# 定义RSI函数
def TA_BBANDS(prices, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
'''
参数设置:
timeperiod = 5
nbdevup = 2
nbdevdn = 2
返回: up, middle, low
'''
up, middle, low = talib.BBANDS(prices, timeperiod, nbdevup, nbdevdn, matype)
ch = (up - low) / low
delta = np.r_[np.nan, np.diff(ch)]
ma30 = TA_MA(prices, timeperiod=30)
boll_band_channel_padding = (ma30 - low) / low
padding_delta = np.r_[np.nan, np.diff(boll_band_channel_padding)]
ch_ma30 = talib.MA(ch, timeperiod=30)
return np.c_[up, middle, low, ch, ch_ma30, delta, padding_delta]
def TA_KDJ(hight, low, close, fastk_period=9, slowk_matype=0, slowk_period=3, slowd_period=3):
'''
参数设置:
fastk_period = 0
lowk_matype = 0,
slowk_period = 3,
slowd_period = 3
返回: K, D, J
'''
K, D = talib.STOCH(hight, low, close, fastk_period=fastk_period, slowk_matype=slowk_matype, slowk_period=slowk_period, slowd_period=slowd_period)
J = 3 * K - 2 * D
delta = np.r_[np.nan, np.diff(J)]
return np.c_[K, D, J, delta]
def TA_CCI(high, low, close, timeperiod=14):
"""
名称:平均趋向指数的趋向指数
简介:使用ADXR指标,指标判断ADX趋势。
"""
real = talib.CCI(high, low, close, timeperiod=14)
delta = np.r_[np.nan, np.diff(real)]
return np.c_[real, delta]
# 写个指标 so easy
def ifup20(data):
# QA内建指标计算 Python原生代码
return (QA.MA(data.close, 5)-QA.MA(data.close, 20)).dropna() > 0
def ifup20_TA(data):
# TA-lib计算
return (TA_MA(data.close, 5)-TA_MA(data.close, 20)).dropna() > 0
# apply到 QADataStruct上
# 写个自定义指标 MAX_FACTOR QA内建指标计算 Python原生代码
def ifmaxfactor_greater(data):
RSI = QA.QA_indicator_RSI(data)
CCI = QA.QA_indicator_CCI(data)
KDJ = QA.QA_indicator_KDJ(data)
MAX_FACTOR = CCI['CCI'] + (RSI['RSI1'] - 50) * 4 + (KDJ['KDJ_J'] - 50) * 4
MAX_FACTOR_delta = np.r_[np.nan, np.diff(MAX_FACTOR)]
REGRESSION_BASELINE = (RSI['RSI1'] - 50) * 4
return ((MAX_FACTOR+MAX_FACTOR_delta)-(REGRESSION_BASELINE-133)).dropna() > 0
# 写个自定义指标 MAX_FACTOR TA-lib计算
def ifmaxfactor_greater_TA(data):
RSI = TA_RSI(data.close)
CCI = TA_CCI(data.high, data.low, data.close)
KDJ = TA_KDJ(data.high, data.low, data.close)
MAX_FACTOR = CCI[:,0] + (RSI[:,0] - 50) * 4 + (KDJ[:,2] - 50) * 4
MAX_FACTOR_delta = np.r_[np.nan, np.diff(MAX_FACTOR)]
REGRESSION_BASELINE = (RSI[:,0] - 50) * 4
return | pd.DataFrame(((MAX_FACTOR+MAX_FACTOR_delta)-(REGRESSION_BASELINE-133)), index=data.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from sklearn.preprocessing import StandardScaler
class ModelNBEATS():
def __init__(self, y, params, **kwargs):
"""Creates a TCNN model object.
Params: y - pandas DataFrame/Series with all time series required for modeling purposes.
**kwargs:
xreg - pandas DataFrame with all external regressors (for prediction purposes, additional periods must be included)
scale_data - bool, use standard scaler to scale data to 0-1 range; default True
num_layers - int, number of dense layers to use in NBEATS block; default 4
num_neurons - int, number of neurons within each dense layer default 128
stacks - list/str, list or comma separated str of values for stacks; default: 'generic, trend, seasonality, generic'
activation - str; default 'relu'
optimizer - str/keras optimizer object; default 'adam'
num_epoch - int, number of epochs for training; default 100
loss - str/keras optimizer object, used for loss evaluation in training; default 'mse'
verbose - parameter to control log/print when fitting the model
"""
def __init__(self, y, **kwargs):
if isinstance(y, pd.Series):
self.id = y.name
self.out = 1
else:
self.id = y.columns
self.out = len(y.columns)
self.xreg = kwargs.get('xreg', pd.DataFrame())
if self.xreg.shape[0]>0:
self.y = y.join(self.xreg).replace([np.inf, -np.inf], 0).fillna(0)
else:
self.y = y.replace([np.inf, -np.inf], 0).fillna(0)
self.y_norm = self.y.copy()
self.scale_data = kwargs.get('scale_data', True)
if self.scale_data:
self.scaler = StandardScaler().fit(y)
self.y_norm[self.id] = self.scaler.transform(y)
self.num_layers = kwargs.get('num_layers',4)
self.num_neurons = kwargs.get('num_neurons',128)
self.stacks = kwargs.get('stacks', 'generic, trend, seasonality, generic')
if type(self.stacks)==str:
self.stacks=self.stacks.split(',')
self.activation = kwargs.get('activation','relu')
self.optimizer = kwargs.get('optimizer','adam')
self.num_epoch = kwargs.get('num_epoch',100)
self.loss = kwargs.get('loss','mse')
self.model = None
self.verbose = kwargs.get('verbose', 1)
class NBeatsBlock(tf.keras.layers.Layer):
def __init__(self, input_size: int, theta_size: int, horizon: int, n_neurons: int, n_layers: int, stack_type: str, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.theta_size = theta_size
self.horizon = horizon
self.n_neurons = n_neurons
self.n_layers = n_layers
self.stack_type = stack_type
# by default block contains stack of 4 fully connected layers each has ReLU activation
self.hidden = [tf.keras.layers.Dense(n_neurons, activation="relu") for _ in range(n_layers)]
# Output of block is a theta layer with linear activation
self.theta_layer = tf.keras.layers.Dense(theta_size, activation="linear", name="theta")
def linear_space(self, backcast_length, forecast_length, is_forecast=True):
ls = K.arange(-float(backcast_length), float(forecast_length), 1) / forecast_length
return ls[backcast_length:] if is_forecast else K.abs(K.reverse(ls[:backcast_length], axes=0))
def seasonality_model(self, thetas, backcast_length, forecast_length, is_forecast):
p = thetas.get_shape().as_list()[-1]
p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
t = self.linear_space(backcast_length, forecast_length, is_forecast=is_forecast)
s1 = K.stack([K.cos(2 * np.pi * i * t) for i in range(p1)])
s2 = K.stack([K.sin(2 * np.pi * i * t) for i in range(p2)])
if p == 1:
s = s2
else:
s = K.concatenate([s1, s2], axis=0)
s = K.cast(s, np.float32)
return K.dot(thetas, s)
def trend_model(self, thetas, backcast_length, forecast_length, is_forecast):
p = thetas.shape[-1] # take time dimension
t = self.linear_space(backcast_length, forecast_length, is_forecast=is_forecast)
t = K.transpose(K.stack([t for i in range(p)]))
t = K.cast(t, np.float32)
return K.dot(thetas, K.transpose(t))
def call(self, inputs):
x = inputs
for layer in self.hidden:
x = layer(x)
theta = self.theta_layer(x)
if self.stack_type == 'generic':
backcast, forecast = theta[:, :self.input_size], theta[:, -self.horizon:]
elif self.stack_type == 'seasonal':
backcast = tf.keras.layers.Lambda(self.seasonality_model, arguments={'is_forecast': False, 'backcast_length': self.theta_size, 'forecast_length': self.horizon}, name='seasonal')(theta[:, :self.input_size])
forecast = tf.keras.layers.Lambda(self.seasonality_model, arguments={'is_forecast': True, 'backcast_length': self.theta_size, 'forecast_length': self.horizon}, name='seasonal')(theta[:, -self.horizon:])
else:
backcast = tf.keras.layers.Lambda(self.trend_model, arguments={'is_forecast': False, 'backcast_length': self.theta_size, 'forecast_length': self.horizon}, name='trend')(theta[:, :self.input_size])
forecast = tf.keras.layers.Lambda(self.trend_model, arguments={'is_forecast': True, 'backcast_length': self.theta_size, 'forecast_length': self.horizon}, name='trend')(theta[:, -self.horizon:])
return backcast, forecast
def _define_model_object(self):
"""
Build NBEATS model arhitecture
"""
if self.model is not None:
return self.model
else:
shape_t, shape_f = len(self.y.index)//2, self.y_norm.shape[1]
inputs = tf.keras.layers.Input(shape=(shape_t, shape_f))
initial_block = self.NBeatsBlock(input_size=shape_t, theta_size=shape_f, horizon=1, n_neurons=self.num_neurons, n_layers=self.num_layers, stack_type=self.stacks[0])
residuals, forecast = initial_block(inputs)
for i in range(1, len(self.stacks)):
backcast, block_forecast = self.NBeatsBlock(input_size=shape_t, theta_size=shape_f, horizon=1, n_neurons=self.num_neurons, n_layers=self.num_layers, stack_type=self.stacks[i])(residuals)
residuals = tf.keras.layers.subtract([residuals, backcast], name=f"subtract_{i}")
forecast = tf.keras.layers.add([forecast, block_forecast], name=f"add_{i}")
model = tf.keras.Model(inputs=inputs, outputs=forecast[0])
return model
def fit(self):
"""
Fit model to the provided data
"""
model = self._define_model_object()
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(self.y_norm.values, self.y_norm[self.id].values, length=model.input.get_shape()[1], batch_size=1)
model.compile(optimizer=self.optimizer, loss=self.loss)
model.fit(generator, steps_per_epoch=1, epochs=self.num_epoch, shuffle=False, verbose=self.verbose)
self.model = model
return self.model
def save(self, path):
"""
Save model object - provide full path, for example: '~/usr/models/mymodel.h5'
"""
self.model.save(path)
def load(self, path):
"""
Load model object - provide full path, for example: '~/usr/models/mymodel.h5'
"""
self.model = tf.keras.models.load_model(path)
def predict(self, h):
"""
Generate predictions for h steps ahead
Params: h - number of steps to forecast
If xreg data was used during the training, it must be included for next h periods in the future
"""
periods=pd.date_range(start=max(self.y.index), periods=h+1, freq=self.y.index.freq)[1:]
pred = pd.DataFrame(data=[], columns=self.y.columns, index=periods)
if self.xreg.shape[0]>0:
pred[self.xreg.columns] = self.xreg[self.xreg.index.isin(pred.index)].values
tmp_pred = self.y_norm[-self.model.input.get_shape()[1]:]
for i in range(h):
inp = np.asarray(tmp_pred[-self.model.input.get_shape()[1]:].values.reshape((1, self.model.input.get_shape()[1], self.y_norm.shape[1]))).astype(np.float32)
p = self.model.predict(inp, verbose=0)
pred.loc[pred.index[i], self.id] = p
tmp_pred = | pd.concat([tmp_pred, pred.iloc[[i]]]) | pandas.concat |
import pandas as pd
import numpy as np
def read_market_value_csv(location='../data/data_money/', season_start=2005, season_end=2020,
col_names=['club', 'squad_size', 'avg_age', 'num_foreigners', 'market_value',
'av_market_value']):
"""
Devuelve un DataFrame que contiene el valor estimado de los equipos por temporada.
:param location:
:param season_start:
:param season_end:
:param col_names:
:return:
"""
market_value = pd.DataFrame(columns=col_names)
for season in range(season_start, season_end + 1):
temp_df = pd.read_csv(location + 'tm-' + str(season) + '-' + str(season + 1) + '.csv')
temp_df['season'] = season
market_value = pd.concat(
[market_value, temp_df])
return market_value
teams = | pd.read_csv('../data/teams.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from IPython.display import display, Markdown as md, clear_output
from datetime import datetime, timedelta
import plotly.figure_factory as ff
import qgrid
import re
from tqdm import tqdm
class ProtectListener():
def __init__(self, pp_log, lng):
"""
Class to analyse protection information.
...
Attributes:
-----------
df (pd.DataFrame): raw data extracted from Wikipedia API.
lng (str): langauge from {'en', 'de'}
inf_str / exp_str (str): "indefinite" / "expires" for English
"unbeschränkt" / "bis" for Deutsch
"""
self.lng = lng
self.df = pp_log
if self.lng == "en":
self.inf_str = "indefinite"
self.exp_str = "expires"
elif self.lng == "de":
self.inf_str = "unbeschränkt"
self.exp_str = "bis"
else:
display(md("This language is not supported yet."))
self.inf_str = "indefinite"
self.exp_str = "expires"
def get_protect(self, level="semi_edit"):
"""
Main function of ProtectListener.
...
Parameters:
-----------
level (str): select one from {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}
...
Returns:
-----------
final_table (pd.DataFrame): detailed dataframe containing protection records for a particular type/level.
plot_table (pd.DataFrame): dataframe for further Gantt Chart plotting.
"""
if len(self.df) == 0:
display(md(f"No {level} protection records!"))
return None, pd.DataFrame(columns=["Task", "Start", "Finish", "Resource"])
else:
self.df = self.df.drop(self.df[self.df["action"] == "move_prot"].index).reset_index(drop=True)
if len(self.df) == 0:
display(md(f"No {level} protection records!"))
return None, | pd.DataFrame(columns=["Task", "Start", "Finish", "Resource"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import sys, os
import urllib.request
import socket
import datetime
import json
import constants
import conversion
import re
PASTAVAILABILITYOUTFILE = 'prediction/pastavailabilities-' + datetime.datetime.now().replace(microsecond=0).isoformat() + '.csv'
CURRENTPASTAVAILABILITYFILE = 'prediction/pastavailabilities-current.csv'
BASEURL = 'https://dev.hsl.fi/citybike/stations/'
def fetchAndWriteCurrentAvailability():
"""Fetch the citybike availability from HSL API for the last 12 hours.
Note: the availabilities are the spot availabilities at the even hours.
Write the availabilities in the folder /prediction.
"""
timeNowAsDateTime = datetime.datetime.utcnow().replace(microsecond=0)
timeNow = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
timeStr = formatTimeStringToHSL(timeNow)
times = []
timeStrs = []
for x in range(13):
times.append((timeNowAsDateTime - datetime.timedelta(hours=x)).isoformat())
timeStrs.append(formatTimeStringToHSL(times[x]))
df = fetchSeriesOfFiles(timeStrs)
df['time'] = df['time'].apply(lambda t: conversion.getUTCTimeStampFromTimeStampString(str(t)))
# Write to csv
# Disabled: write out timestamped past availability file. Uncomment next line to enable.
#df.to_csv(PASTAVAILABILITYOUTFILE, index=False)
df.to_csv(CURRENTPASTAVAILABILITYFILE, index=False)
def formatTimeStringToHSL(time):
timeStr = str(time)
timeStr = timeStr.translate({ord(c): None for c in '-:'})
timeStr = timeStr[:11] + '0001Z'
return timeStr
def formatTimeStringToData(timeStr):
timeStr = timeStr[:11] + '0000Z'
time = datetime.datetime.strptime(timeStr, "%Y%m%dT%H%M%SZ")
return time
def parseAndWriteWeatherForecast(response):
"""
# Write to csv, both timestamped version and replace current
df_weatherPred.to_csv(WEATHERFORECASTOUTFILE, index=False)
df_weatherPred.to_csv(CURRENTWEATHERFORECASTFILE, index=False)
print('Latest weather forecast fetched.') """
def fetchSingleFile(timeStr):
reqURL = BASEURL + 'stations_' + timeStr + '.json'
print('Sending request to: ', reqURL)
req = urllib.request.Request(reqURL)
try:
response = urllib.request.urlopen(req, timeout=10)
#parseAndWriteWeatherForecast(response)
return json.load(response)
except socket.timeout:
print('Bike availability data could not be fetched.')
print(' Socket timed out - URL %s', req)
return False
except urllib.error.URLError as e:
print('Bike availability data could not be fetched.')
if hasattr(e, 'reason'):
print(' Program failed to reach HSL server.')
print(' Reason: ', e.reason)
elif hasattr(e, 'code'):
print('Bike availability data could not be fetched.')
print(' The HSL server couldn\'t fulfill the request.')
print(' Error code: ', e.code)
return False
def fetchSeriesOfFiles(timeStrs):
aggregateDf = | pd.DataFrame(columns=['stationid', 'time', 'avlbikes']) | pandas.DataFrame |
def meanOrderFrequency(path_to_dataset):
"""
Displays the mean order frequency by utilizing the orders table.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
print('On an average, people order once every ', orders['days_since_prior_order'].mean(), 'days')
def numOrdersVsDays(path_to_dataset):
"""
Displays the number of orders and how this number varies with change in days since last order.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
order_by_date = orders.groupby(by='days_since_prior_order').count()
fig = plt.figure(figsize = [15, 7.5])
ax = fig.add_subplot()
order_by_date['order_id'].plot.bar(color = '0.75')
ax.set_xticklabels(ax.get_xticklabels(), fontsize= 15)
plt.yticks(fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_children()[7].set_color('0.1')
ax.get_children()[14].set_color('0.1')
ax.get_children()[21].set_color('0.1')
ax.get_children()[30].set_color('0.1')
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2]], visible=True)
plt.xticks(rotation = 'horizontal');
def numOrderDaysSizeBubble(path_to_dataset):
"""
Plots a bubble plot in which:
x: Days since Previous Order
y: Number of orders/1000
size: Average Size of order given it was placed on x
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
assert isinstance(path_to_dataset, str)
order_file_path = path_to_dataset + '/orders.csv'
order_product_prior_file_path = path_to_dataset + '/order_products__prior.csv'
orders = pd.read_csv(order_file_path)
order_products_prior = pd.read_csv(order_product_prior_file_path)
order_id_count_products = order_products_prior.groupby(by='order_id').count()
orders_with_count = order_id_count_products.merge(orders, on='order_id')
order_by_date = orders.groupby(by='days_since_prior_order').count()
# take above table and group by days_since_prior_order
df_mean_order_size = orders_with_count.groupby(by='days_since_prior_order').mean()['product_id']
df_mean_order_renamed = df_mean_order_size.rename('average_order_size')
bubble_plot_dataframe = pd.concat([order_by_date['order_id'], df_mean_order_renamed], axis=1)
bubble_plot_dataframe['average_order_size'].index.to_numpy()
fig = plt.figure(figsize=[15,7.5])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy(), bubble_plot_dataframe['order_id'].values, s=((bubble_plot_dataframe['average_order_size'].values/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 31, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
fig = plt.figure(figsize=[10,9])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy()[:8], bubble_plot_dataframe['order_id'].values[:8], s=((bubble_plot_dataframe['average_order_size'].values[:8]/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 8, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
def orderTimeHeatMaps(path_to_dataset):
"""
Plots the distribution of order with respect to hour of day and day of the week.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6])
grouped_data.sort_index(level=0, inplace=True)
plt.figure(figsize=(12,6))
hour_of_day = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14','15','16', '17', '18', '19','20', '21', '22', '23']
dow = [ 'SUN', 'MON', 'TUES', 'WED', 'THUR','FRI','SAT']
ax = sns.heatmap(grouped_data, xticklabels=hour_of_day,yticklabels=dow,cbar_kws={'label': 'Number Of Orders Made/1000'})
cbar = ax.collections[0].colorbar
cbar.set_ticks([0, 10000, 20000, 30000, 40000, 50000])
cbar.set_ticklabels(['0','10.0','20.0','30.0','40.0','50.0'])
ax.figure.axes[-1].yaxis.label.set_size(15)
ax.figure.axes[0].yaxis.label.set_size(15)
ax.figure.axes[0].xaxis.label.set_size(15)
ax.set(xlabel='Hour of Day', ylabel= "Day of the Week")
ax.set_title("Number of orders made by Day of the Week vs Hour of Day", fontsize=15)
plt.show()
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6])
grouped_data.sort_index(level=0, inplace=True)
plt.figure(figsize=(12,6))
hour_of_day = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14','15','16', '17', '18', '19','20', '21', '22', '23']
dow = [ 'SUN', 'MON', 'TUES', 'WED', 'THUR','FRI','SAT']
ax = sns.heatmap(np.log(grouped_data), xticklabels=hour_of_day,yticklabels=dow,cbar=False)
cbar = ax.collections[0].colorbar
ax.figure.axes[-1].yaxis.label.set_size(15)
ax.figure.axes[0].yaxis.label.set_size(15)
ax.figure.axes[0].xaxis.label.set_size(15)
ax.set(xlabel='Hour of Day', ylabel= "Day of the Week")
ax.set_title("Number of orders made by Day of the Week vs Hour of Day (Log Scale)", fontsize=15)
plt.show()
def generateWordCloud(path_to_dataset):
"""
Generates word cloud.
:param path_to_dataset: path to dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
from wordcloud import WordCloud
import pandas as pd
import matplotlib.pyplot as plt
product_path = path_to_dataset + "/products.csv"
aisles_path = path_to_dataset + "/aisles.csv"
departments_path = path_to_dataset + "/departments.csv"
order_product_prior_path = path_to_dataset + "/order_products__prior.csv"
df_products = pd.read_csv(product_path)
df_aisles = pd.read_csv(aisles_path)
df_departments = | pd.read_csv(departments_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Mix of utilities."""
import argparse
import json
import logging
from pathlib import Path
from typing import Any, Callable, Dict, List
import pandas as pd
import yaml
from ndj_pipeline import config, model, post
def clean_column_names(column_list: List[str]) -> Dict[str, str]:
"""Simple string cleaning rules for columns.
Args:
column_list: Column names to be cleaned
Returns:
A dict mapping old and cleaned column names.
"""
new_column_list = [
(
col.lower()
.strip()
.replace(" ", "_")
.replace(r"/", "_")
.replace(r"\n", "_")
.replace(r"\\", "_")
.replace(r"\t", "_")
.replace(" ", "_")
.replace("^", "")
)
for col in column_list
]
return dict(zip(column_list, new_column_list))
def get_model(function: str) -> Callable:
"""Simple redirection to get named function from model.py."""
return getattr(model, function)
def get_post(function: str) -> Callable:
"""Simple redirection to get named function from post.py."""
return getattr(post, function)
def load_model_config(model_config_path: str) -> Dict[str, Any]:
"""Loads model config, either from yaml or json format."""
config_path = Path(model_config_path)
if config_path.suffix == ".yaml":
return yaml.safe_load(config_path.open())
elif config_path.suffix == ".json":
return json.load(config_path.open())
else:
raise ValueError(f"Unsupported config file type {model_config_path}")
def get_model_path(model_config: Dict[str, Any]) -> Path:
"""Returns the model path from config file."""
return Path(config.default_model_folder, model_config["run_name"])
def create_model_folder(model_config: Dict[str, Any]) -> None:
"""Create model asset folder and write config if it doesn't exist."""
model_path = get_model_path(model_config)
if model_path.exists():
return None
else:
model_path.mkdir()
config = Path(model_path, "config.json")
with open(config, "w") as f:
json.dump(model_config, f, indent=4)
def create_tables_html() -> None:
"""Scan schemas directory to create HTML page for data documentation."""
schema_paths = Path("schemas").glob("*.yaml")
html_list = []
for schema_path in schema_paths:
logging.info(f"Loading schema from {schema_path}")
with open(schema_path, "r") as f:
schema = yaml.safe_load(f)
table_name = f"<h1>{schema_path.stem.title()}</h1>"
html_list.append(table_name)
table_comment = schema.get("comment", "")
html_list.append(table_comment)
table_html = parse_schema_to_table(schema)
html_list.append(table_html)
output_path = Path("docs", "data_dictionary.html")
logging.info(f"Saving data dictionary to {output_path}")
html = "\n<p>\n".join(html_list)
with open(output_path, "w") as f:
f.write(html)
def parse_schema_to_table(schema: Dict[str, Any]) -> str:
"""Parses a table schema into a HTML table for use in documentation."""
data = | pd.DataFrame.from_dict(schema["columns"], orient="index") | pandas.DataFrame.from_dict |
import re
import time
from functools import partial
from itertools import product
from multiprocessing import Manager, Pool
import pandas as pd
from numpy.random import shuffle
from toolz import partition_all
from toolz.dicttoolz import valfilter
from cnswd.mongodb import get_db
from cnswd.setting.constants import MARKET_START, MAX_WORKER
from cnswd.utils import make_logger
from cnswd.websource.wy import fetch_company_info
from .base import get_stock_status
logger = make_logger('网易公司资料')
NAMES = ['公司简介', 'IPO资料']
START = MARKET_START.tz_localize(None)
def create_index_for(collection):
# 不存在索性信息时,创建索引
if not collection.index_information():
collection.create_index([("股票代码", 1)])
def need_refresh(collection2, code):
"""是否需要刷新
简单规则:
如果已经存在IPO日期,且24小时内已经刷新 ❌ 否则 ✔
"""
now = pd.Timestamp('now')
doc = collection2.find_one({'股票代码': code})
if doc:
# 当更新间隔时间超过一天,且上市日期为空时才需要更新
cond1 = now - doc['更新时间'] >= pd.Timedelta(days=1)
cond2 = pd.isnull(doc.get('上市日期', None))
return cond1 and cond2
else:
return True
def _droped_null(doc):
res = {}
for k, v in doc.items():
if not | pd.isnull(doc[k]) | pandas.isnull |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
| Timestamp("20130101") | pandas.Timestamp |
##########################################################################
## Summary
##########################################################################
'''
Runs a Flask-based web server that can dynamically run our models for us.
Warning - overall pretty quick and dirty - not great attention payed to consistent
or short variable names, or the ability to reuse code sections.
Just getting a website up in time for the presentation.
'''
##########################################################################
## Imports & Configuration
##########################################################################
from flask import Flask
from flask import render_template
import pickle
import pandas
import json
from sklearn.ensemble import RandomForestClassifier
import numpy
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, MinMaxScaler, OneHotEncoder
from sklearn.pipeline import Pipeline
def clean_dataframe(dataframe, debug=False):
'''
A modified version of clean_dataframe copied from the
prediction/data_utilities.py, edited so that it can operate
standalone (had conflicts with Flask)
'''
#Convert all the categorical names to numbers.
with open('meta.json', 'r') as f:
meta = json.load(f)
categorical_features = meta['categorical_features']
for column_name in categorical_features:
if column_name in dataframe.columns:
categories = categorical_features[column_name]
categories_map = {x:i for i,x in enumerate(categories)}
dataframe[column_name] = dataframe[column_name].map(categories_map)
#Replacing string values in rent
replace_mapping = { 'median_rent': {'-': numpy.nan,'100-': 100, '2,000+': 2000}}
try:
dataframe.replace(to_replace=replace_mapping, inplace=True)
dataframe['median_rent'] = | pandas.to_numeric(dataframe['median_rent'], errors='ignore') | pandas.to_numeric |
# coding: utf-8
# Create input features for the boosted decision tree model.
import os
import sys
import math
import datetime
import pandas as pd
from sklearn.pipeline import Pipeline
from common.features.lag import LagFeaturizer
from common.features.rolling_window import RollingWindowFeaturizer
from common.features.stats import PopularityFeaturizer
from common.features.temporal import TemporalFeaturizer
# Append TSPerf path to sys.path
tsperf_dir = os.getcwd()
if tsperf_dir not in sys.path:
sys.path.append(tsperf_dir)
# Import TSPerf components
from utils import df_from_cartesian_product
import retail_sales.OrangeJuice_Pt_3Weeks_Weekly.common.benchmark_settings as bs
pd.set_option("display.max_columns", None)
def oj_preprocess(df, aux_df, week_list, store_list, brand_list, train_df=None):
df["move"] = df["logmove"].apply(lambda x: round(math.exp(x)))
df = df[["store", "brand", "week", "move"]].copy()
# Create a dataframe to hold all necessary data
d = {"store": store_list, "brand": brand_list, "week": week_list}
data_grid = df_from_cartesian_product(d)
data_filled = pd.merge(data_grid, df, how="left", on=["store", "brand", "week"])
# Get future price, deal, and advertisement info
data_filled = pd.merge(data_filled, aux_df, how="left", on=["store", "brand", "week"])
# Fill missing values
if train_df is not None:
data_filled = | pd.concat(train_df, data_filled) | pandas.concat |
import os
import zipfile
import pandas as pd
# import flirt
import flirt.reader.empatica
def get_features_for_empatica_archive(zip_file_path: str,
window_length: int = 180,
window_step_size: int = 1,
hrv_features: bool = True,
eda_features: bool = True,
acc_features: bool = True,
debug: bool = False) -> pd.DataFrame:
"""
This function provides a standard set of HRV, EDA, ACC features for a given Empatica archive \
(e.g. downloaded from E4 connect)
Parameters
----------
zip_file_path : str
path to the Empatica zip file
window_length : int
the epoch width (window size) in seconds, for which features should be calculated
window_step_size : int
the step size for the sliding window in seconds
hrv_features : bool
whether HRV features should be calculated from the archive
eda_features : bool
whether EDA features should be calculated from the archive
acc_features : bool
whether ACC features should be calculated from the archive
debug : bool
whether debug output should be printed
Returns
-------
pd.DataFrame
a pandas DataFrame containing a standard set of features for Empatica
Examples
--------
>>> import flirt.simple
>>> features = flirt.simple.get_features_for_empatica_archive("1560460372_A12345.zip")
"""
if zip_file_path is None or not os.path.isfile(zip_file_path) or not zipfile.is_zipfile(zip_file_path):
raise ValueError('zip file does note exist %s' % zip_file_path)
if debug:
print("Reading files")
df_hrv_features = pd.DataFrame()
df_eda_features = pd.DataFrame()
df_acc_features = pd.DataFrame()
with zipfile.ZipFile(zip_file_path) as zip_file:
if hrv_features:
with zip_file.open("IBI.csv") as f:
ibi_data = flirt.reader.empatica.read_ibi_file_into_df(f)
if debug:
print("Calculating HRV features")
df_hrv_features = flirt.hrv.get_hrv_features(ibi_data.iloc[:, 0], window_length=window_length,
window_step_size=window_step_size)
if eda_features:
with zip_file.open("EDA.csv") as f:
eda_data = flirt.reader.empatica.read_eda_file_into_df(f)
if debug:
print("Calculating EDA features")
df_eda_features = flirt.eda.get_eda_features(eda_data.iloc[:, 0], window_length=window_length,
window_step_size=window_step_size).add_prefix('eda_')
if acc_features:
with zip_file.open("ACC.csv") as f:
acc_data = flirt.reader.empatica.read_acc_file_into_df(f)
if debug:
print("Calculating ACC features")
df_acc_features = flirt.acc.get_acc_features(acc_data[:], window_length=window_length,
window_step_size=window_step_size).add_prefix('acc_')
return __merge_features(df_hrv_features, df_eda_features, df_acc_features, freq='%ds' % window_step_size)
def __merge_features(hrv_features: pd.DataFrame, eda_features: pd.DataFrame, acc_features: pd.DataFrame,
freq: str = '1s') -> pd.DataFrame:
if hrv_features.empty and eda_features.empty and acc_features.empty:
print("Received empty input features, returning empty df")
return | pd.DataFrame() | pandas.DataFrame |
# -*- coding: UTF-8 -*-
"""
@CreateDate: 2020/07/18
@Author: <NAME>
@File: process.py
@Project: stagewiseNN
"""
import os
import sys
from pathlib import Path
from typing import Sequence, Mapping, Optional, Union, Callable
import logging
import pandas as pd
import numpy as np
import scanpy as sc
from scipy import sparse
from sklearn.preprocessing import label_binarize
from ._scale import wrapper_scale
def check_dirs(path):
if os.path.exists(path):
print('already exists:\n\t%s' % path)
else:
os.makedirs(path)
print('a new directory made:\n\t%s' % path)
def reverse_dict(d: dict, ):
"""
the values of the dict must be list-like type
"""
d_rev = {}
for k in d.keys():
vals = d[k]
_d = dict.fromkeys(vals, k)
d_rev.update(_d)
return d_rev
def describe_dataframe(df: pd.DataFrame, **kwargs):
for c in df.columns:
print(c.center(40, '-'), **kwargs)
print(describe_series(df[c], asstr=True), **kwargs)
def describe_series(
srs: Sequence, max_cats: int = 100,
asstr: bool = False,
):
""" inspect data-structure """
srs = pd.Series(srs)
if len(srs.unique()) <= max_cats:
desc_type = 'Value counts'
result = srs.value_counts(dropna=False)
elif isinstance(srs[0], (int, float)):
desc_type = 'Numerical summary'
result = srs.describe()
else:
desc_type = 'Header lines'
result = srs.head()
if asstr:
return f'{desc_type}:\n{result}'
else:
return result, desc_type
def make_binary(mat):
mat_bin = mat.copy()
mat_bin[mat_bin > 0] = 1.
return mat_bin
def set_adata_hvgs(
adata: sc.AnnData,
gene_list: Optional[Sequence] = None,
indicator: Optional[Sequence[bool]] = None,
slim: bool = True,
copy: bool = False,
):
"""
Setting the given (may be pre-computed) set of genes as highly variable,
if `copy` is False, changes will be made to the input adata.
if slim is True and adata.raw is None, raw data will be backup.
"""
if copy:
adata = adata.copy()
logging.info(
'Setting the given set of %d genes as highly variable' % len(gene_list))
if (indicator is None) and (gene_list is not None):
indicator = [g in gene_list for g in adata.var_names]
adata.var['highly_variable'] = indicator
if slim:
if adata.raw is None:
adata.raw = adata
logging.info('slimming adata to contain only HVGs')
adata = adata[:, adata.var['highly_variable']].copy()
return adata
def change_names(
seq: Sequence,
mapping: Optional[Mapping] = None,
**kwmaps
) -> list:
mapping = {} if mapping is None else mapping
mapping.update(kwmaps)
func = lambda x: mapping.get(x, x)
return list(map(func, seq))
def normalize_default(
adata, target_sum=None,
copy=False, log_only=False,
):
"""Normalizing datasets with default settings (total-counts normalization
followed by log(x+1) transform).
Parameters
----------
adata
``AnnData`` object
target_sum
scale factor of total-count normalization
copy
whether to copy the dataset
log_only
whether to skip the "total-counts normalization" and only perform
log(x+1) transform
Returns
-------
``AnnData`` or None
"""
if copy:
adata = adata.copy()
logging.info('A copy of AnnData made!')
else:
logging.info('No copy was made, the input AnnData will be changed!')
logging.info('normalizing datasets with default settings.')
if not log_only:
logging.info(
f'performing total-sum normalization, target_sum={target_sum}...')
sc.pp.normalize_total(adata, target_sum=target_sum)
else:
logging.info('skipping total-sum normalization')
sc.pp.log1p(adata)
return adata
def normalize_log_then_total(
adata, target_sum=None,
copy=False,
):
""" For SplitSeq data, performing log(x+1) BEFORE total-sum normalization
will results a better UMAP visualization (e.g. clusters would be less
confounded by different total-counts ).
"""
if copy:
adata = adata.copy()
logging.info('A copy of AnnData made!')
sc.pp.log1p(adata, )
sc.pp.normalize_total(adata, target_sum=target_sum, )
return adata
def groupwise_hvgs_freq(
adata,
groupby='batch',
return_hvgs: bool = True,
**hvg_kwds,
):
""" Separately compute highly variable genes (HVGs) for each group, and
count the frequencies of genes being selected as HVGs among those groups.
Parameters
----------
adata
the ``AnnData`` object
groupby
a column name in ``adata.obs`` specifying batches or groups that you
would like to independently compute HVGs.
return_hvgs
whether to return the computed dict of HVG-lists for each group
hvg_kwds
Other Parameters for ``sc.pp.highly_variable_genes``
Returns
-------
hvg_freq: dict
the HVG frequencies
hvg_dict: dict
returned only if ``return_hvgs`` is True
"""
from collections import Counter
hvg_dict = {}
hvg_freq = Counter()
group_labels = adata.obs[groupby]
for g in group_labels.unique():
_adt = adata[group_labels == g].copy()
sc.pp.highly_variable_genes(_adt, **hvg_kwds)
_hvgs = _adt.var[_adt.var['highly_variable']].index
hvg_freq += Counter(_hvgs)
if return_hvgs:
hvg_dict[g] = list(_hvgs)
hvg_freq = dict(hvg_freq)
if return_hvgs:
return hvg_freq, hvg_dict
return hvg_freq
def take_high_freq_elements(
freq: Mapping,
min_freq: int = 3):
return list(filter(lambda x: freq[x] >= min_freq, freq.keys()))
def set_precomputed_neighbors(
adata,
distances,
connectivities,
n_neighbors=15,
metric='cosine', # pretended parameter
method='umap', # pretended parameter
metric_kwds=None, # pretended parameter
use_rep=None, # pretended parameter
n_pcs=None, # pretended parameter
key_added=None, #
):
if key_added is None:
key_added = 'neighbors'
conns_key = 'connectivities'
dists_key = 'distances'
else:
conns_key = key_added + '_connectivities'
dists_key = key_added + '_distances'
if connectivities is None:
connectivities = distances.copy().tocsr()
connectivities[connectivities > 0] = 1
adata.obsp[dists_key] = distances
adata.obsp[conns_key] = connectivities
adata.uns[key_added] = {}
neighbors_dict = adata.uns[key_added]
neighbors_dict['connectivities_key'] = conns_key
neighbors_dict['distances_key'] = dists_key
neighbors_dict['params'] = {'n_neighbors': n_neighbors, 'method': method}
neighbors_dict['params']['metric'] = metric
if metric_kwds is not None:
neighbors_dict['params']['metric_kwds'] = metric_kwds
if use_rep is not None:
neighbors_dict['params']['use_rep'] = use_rep
if n_pcs is not None:
neighbors_dict['params']['n_pcs'] = n_pcs
return adata
def quick_preprocess_raw(
adata: sc.AnnData,
target_sum: Optional[int] = None,
hvgs: Optional[Sequence] = None,
batch_key=None,
copy=True,
log_first: bool = False,
**hvg_kwds
) -> sc.AnnData:
"""
Go through the data-analysis pipeline, including normalization, HVG
selection, and z-scoring (centering and scaling)
Parameters
----------
adata
the ``Anndata`` object
target_sum
the target total counts after normalization.
If `None`, after normalization, each observation (cell) has a total
count equal to the median of total counts for observations (cells)
before normalization.
hvgs
highly variable genes to be used for dimensionality reduction
(centering and PCA)
batch_key
a column name in ``adata.obs`` specifying the batch labels
copy
whether to make a co[y of the input data. if `False`, the data object
will be change inplace.
log_first
for some data distributions, perform log(x+1) before total-count
normalization might give a better result (e.g. clustering results
may be less affected by the sequencing depths)
hvg_kwds
other key-word parameters for ``sc.pp.highly_variable_genes``
Returns
-------
"""
if copy:
_adata = adata.copy()
logging.info('A copy of AnnData made!')
else:
_adata = adata
logging.info('No copy was made, the input AnnData will be changed!')
# 1: normalization
if log_first:
normalize_log_then_total(_adata, target_sum=target_sum)
else:
normalize_default(_adata, target_sum=target_sum)
# 2: HVG selection (skipped if `hvgs` is given)
if hvgs is None:
sc.pp.highly_variable_genes(
_adata, batch_key=batch_key, **hvg_kwds)
indicator = _adata.var['highly_variable']
# _adata = _adata[:, _adata.var['highly_variable']]
else:
indicator = None
_adata = set_adata_hvgs(_adata, gene_list=hvgs, indicator=indicator, )
# 3: z-score
wrapper_scale(_adata, groupby=batch_key)
return _adata
def label_binarize_each(labels, classes, sparse_out=True):
lb1hot = label_binarize(labels, classes=classes, sparse_output=sparse_out)
if len(classes) == 2:
lb1hot = lb1hot.toarray()
lb1hot = np.c_[1 - lb1hot, lb1hot]
if sparse_out:
lb1hot = sparse.csc_matrix(lb1hot)
return lb1hot
def group_mean(X, labels,
binary=False, classes=None, features=None,
print_groups=True):
"""
This function may work with more efficiency than `df.groupby().mean()`
when handling sparse matrix.
Parameters
----------
X: shape (n_samples, n_features)
labels: shape (n_samples, )
classes: optional
names of groups
features: optional
names of features
print_groups: bool
whether to inspect the groups
"""
classes = np.unique(labels, ) if classes is None else classes
if binary:
X = (X > 0) # .astype('float')
print('Binarized...the results will be the expression proportions.')
if len(classes) == 1:
grp_mean = X.mean(axis=0).T
else:
lb1hot = label_binarize_each(labels, classes=classes, sparse_out=True)
print(f'Calculating feature averages for {len(classes)} groups')
if print_groups:
print(classes)
grp_mean = X.T.dot(lb1hot) / lb1hot.sum(axis=0)
grp_mean = | pd.DataFrame(grp_mean, columns=classes, index=features) | pandas.DataFrame |
import argparse
import logging
from keras.models import Sequential
from keras.layers import *
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import xarray as xr
# ----------------------------------------------------------------------------------------------------------------------
# set up a basic, global _logger which will write to the console as standard error
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
_logger = logging.getLogger(__name__)
# ----------------------------------------------------------------------------------------------------------------------
def pull_vars_into_dataframe(dataset,
variables,
level,
hemisphere=None):
"""
Create a pandas DataFrame from the specified variables of an xarray DataSet.
:param dataset: xarray.DataSet
:param variables: list of variables to be extracted from the DataSet and included in the resulting DataFrame
:param level: the level index (all times, lats, and lons included from this indexed level)
:param hemisphere: 'north', 'south', or None
:return:
"""
# the dataframe we'll populate and return
df = | pd.DataFrame() | pandas.DataFrame |
# =============================================================================
# File: DocSim_class.py
# Author: <NAME>, <NAME>
# Functions:
# init: (defining the class object)
# input: dataframe: data,
# string: text, name of the column in data that contains text as a single string
# Additional features:
# factors: defined in preprocessing function. Retreived in getFeatureName function
# vectorized_documents: defined in preprocessing function. Retreived in getFeatureName function
# similarity_scores: defined in doc_sim function. Retreived in getSimalarityScore function
# preprocessing:
# input: optional boolean parameters: remove_stopwords, stem, tfidf, LSA. Default False
# optional list of additional stopwords: filler_words. Default empty list
# optional int number of LSA topics: lsa_n_components. Default 2
# output: Returns a dataframe with an additional column "cleaned_vectorized_document"
# that contains the preprocessed and vectorized documents.
# Note: Preprocessing does not change the dataframe used in defining the class.
# It returns a copy of the preprocessed dataframe and saves the list of words
# corresponding to the word vectors to the object.
# get_feature_names:
# input: None
# output: Returns the list of words that corresponds to the numeric vectors
# get_preprocessed_text:
# input: None
# output: Returns the series 'clean_processed_text'.
# =============================================================================
#%%
# Standard Library Modules
import os
import re
import string
import datetime
#Related 3rd Party Modules
import nltk
nltk.download('wordnet')
import pandas
import numpy
import scipy
import string
#sklearn does not automatically import its subpackages
import sklearn
import sklearn.metrics
import sklearn.decomposition
import sklearn.feature_extraction
#%%
# DocSim Class
class DocSim: #defining the class
"""
Parameters
----------
data : dataframe
Dataframe where one column contains the text from the documents.
skill : String
Specify the name of the column that contains the skill information.
study : String
Specify the name of the column that contains the study information.
doc_type : String
Specify the name of the column that contains the document type.
e.g. "script" or "transcript"
text : String
Specify the name of the column that contains the document text.
"""
def __init__(self, data, skill, study, doc_type, doc_id, text):
# Initialize the attributes
self.data = data.fillna("NA")
self.doc_id = doc_id
self.skill = skill
self.study = study
self.doc_type = doc_type
self.text = text
# The column of preprocessed numeric vectors
self.vectorized_documents = "Apply function 'preprocessing' before \
'get_preprocessed_text' to get the column 'clean_vectorized_document'"
# The word vector corresponding to the numeric vector
self.tfidf_factors = "Apply function 'preprocessing' before \
'get_tfidf_feature_names' to get the feature names associated \
with the numeric vectors in 'clean_vectorized_document'"
self.lsa_factors = "Apply function 'preprocessing' before \
'get_lsa_feature_names' to get the feature names associated \
with the numeric vectors in 'clean_vectorized_document'"
# Check if column name specified is correct
if self.text not in self.data.columns:
raise SystemExit(f"Incorrect 'text' used. \
Cannot find {self.text} in data")
if self.skill not in self.data.columns:
raise SystemExit(f"Incorrect 'skill' used. \
Cannot find {self.skill} in data")
if self.study not in self.data.columns:
raise SystemExit(f"Incorrect 'study' used. \
Cannot find {self.study} in data")
if self.doc_type not in self.data.columns:
raise SystemExit(f"Incorrect 'doc_type' used. \
Cannot find {self.doc_type} in data")
# clean and convert text to numeric vector
def preprocessing(self,
remove_stopwords = False,
filler_words = [],
stem = False,
lemm = False,
tfidf = False,
tfidf_level = 'skill',
lsa = False,
lsa_n_components = 2,
ngram = 1,
remove_punctuation = None):
"""
Parameters
----------
remove_stopwords : Boolean, optional
Remove stopwords and punctuation from the Natural Language Toolkit's
(NLTK) pre-specified list. The default is False.
filler_words : List of strings, optional
Specify any additional stop words to be removed. The default is [].
stem : Boolean, optional
Replace all word derivatives with a single stem. The default is False.
tfidf : Boolean, optional
Weight each word frequency using term frequency–inverse document
frequency (tf-idf) weighting. The default is False.
tfidf_level: string, ['full', 'study', 'skill', 'document']
Specify the level of hierarchy to apply tf-idf
lsa : Boolean, optional
Apply the dimentionality reduction technique Latent Semantic
Analysis (LSA).
The default is False.
lsa_n_components : Int, optional
The number of topics in the output data. The default is 2.
Returns
-------
df : Dataframe
A copy data with an additional column containing the preprocessed
and vectorized documents.
Package Dependencies
------
nltk
string
sklearn
"""
# Make a copy of the data
df = self.data.copy()
# Isolate text column as lowercase
text = self.data[self.text].str.lower()
# Convert filler words from list to a set
filler_words = set(filler_words)
# Define a set of stopwords
if remove_stopwords:
filler_words = filler_words.union(\
set(nltk.corpus.stopwords.words('english')))
# Add punctuation to stopwords, if applicable
if (remove_punctuation is None and remove_stopwords) \
or remove_punctuation:
filler_words = filler_words.union(set(string.punctuation))
# If no items have been added to filler words,
# define stopwords as None
if not filler_words:
filler_words = None
# Set aside ngram filler words to remove later
ngram_fillers = []
if not filler_words is None and len(filler_words) > 0:
ngram_fillers = [x for x in filler_words if " " in x]
# print(filler_words)
# Stem words by cutting off at the root of the word
if stem:
if filler_words == None:
filler_words = [filler_words]
# Tokenize
# text = text.apply(lambda x: nltk.tokenize.casual.casual_tokenize(x))
text = text.apply(lambda x: nltk.tokenize.word_tokenize(x))
# Set up the stemmer and apply stemming
stemmer = nltk.stem.SnowballStemmer('english')
text = text.apply(lambda x: [stemmer.stem(item) for item in x])
# Remove filler words if possible, and join together
text = text.apply(lambda x: \
' '.join([y for y in x if y not in filler_words]))
# Ensure filler words are not applied multiple times
filler_words = None
# Lemmantize by converting words to a simpler form
elif lemm:
if filler_words == None:
filler_words = [filler_words]
# text = text.apply(lambda x: nltk.tokenize.casual.casual_tokenize(x))
text = text.apply(lambda x: nltk.tokenize.word_tokenize(x))
# Set up the lemmatizer and apply
wnl = nltk.stem.WordNetLemmatizer()
text = text.apply(lambda x: [wnl.lemmatize(item) for item in x])
# Remove filler words if possible, and join together
text = text.apply(lambda x: \
' '.join([y for y in x if y not in filler_words]))
# Ensure filler words are not applied multiple times
filler_words = None
# Remove ngram > 1 stop words, filler words,
# and/or punctuation that are in filler_words
if ngram_fillers:
for nf in ngram_fillers:
text = text.apply(lambda x: x.lower().replace(nf.lower(), " "))
# Vectorize the text: using tf-idf weights
if tfidf:
# Create default TFIDFVectorizer input parameters
tf_params = {'lowercase': True,
'stop_words': filler_words,
'ngram_range': (1, ngram)}
# Lookup dictionary for alternative TF-IDF parameter settings
tfopts = {"bool": ["binary", True],
"l1": ["norm", "l1"],
"l2": ["norm", "l2"],
"logn": ["smooth_idf", True],
"sub": ["sublinear_tf", True],
"tf_only": ["use_idf", False]}
# Handle alternative tf-idf options using lookup dict
if tfidf in tfopts.keys():
b = tfopts[tfidf]
tf_params[b[0]] = b[1]
# Handle different level of TF-IDF
if tfidf_level == 'full':
# Tfidf Vectorizer
# vectorizer = sklearn.feature_extraction.text.\
# TfidfVectorizer(lowercase = True,
# stop_words = filler_words,
# ngram_range = (1, ngram))
vectorizer = sklearn.feature_extraction.text.\
TfidfVectorizer(**tf_params)
vectors = vectorizer.fit_transform(text.tolist())
# Save feature names
self.tfidf_factors = [('full', vectorizer.get_feature_names())]
# TF-IDF at each appropriate Level
else:
level_opts = {'skill': self.skill,
'study': self.study,
'document': self.doc_id}
df = df.sort_values(level_opts[tfidf_level])
vectors = self.tfidf_preprocessing(level = level_opts[tfidf_level],
text = text,
tfidf_params = tf_params)
# filler_words = filler_words,
# ngram = ngram,
# If tf-idf is not enabled, vectorize the text using word counts
else:
# Count vectorize
vectorizer = sklearn.feature_extraction.text.\
CountVectorizer(lowercase = True,
stop_words = filler_words,
ngram_range = (1, ngram))
vectors = vectorizer.fit_transform(text.tolist())
# Save feature names
self.tfidf_factors = [('full', vectorizer.get_feature_names())]
# Apply LSA using the vectorized text
if lsa:
# Check if the number of LSA components is >= the number of features
if lsa_n_components >= min([len(self.tfidf_factors[x][1]) \
for x in range(0, len(self.tfidf_factors))]):
raise SystemExit("lsa_n_components is too large for this set of documents.\
lsa_n_components must be less than " +
str(min([len(self.tfidf_factors[x][1])
for x in range(0, len(self.tfidf_factors))])))
# Rename the vectors
vectorized_text = vectors
# Define the LSA function
lsa_function = sklearn.decomposition.TruncatedSVD( \
n_components = lsa_n_components, random_state = 100)
# Convert text to vectors
vectors = lsa_function.fit_transform(vectorized_text).tolist()
# Define list of topic numbers
self.lsa_factors = ['topic ' + str(i) for i in range(1, lsa_n_components + 1)]
# Save the vectorized documents to the class
self.vectorized_documents = vectors
df["cleaned_vectorized_document"] = vectors
return df
# Append preprocessed text to the dataframe and return
denselist = vectors.todense().tolist()
# # Save the vectorized documents to the class
self.vectorized_documents = denselist
df["cleaned_vectorized_document"] = denselist
return df
def tfidf_preprocessing(self, level, text, tfidf_params = None):
#filler_words, ngram = 1,
"""
Apply TF-IDF at different level of hierarchy
Parameters
----------
level: string
Column name of the level of hierarchy in self.data
text: Pandas Series
A Pandas series of raw text for each document
filler_words: list
A list of filler words to be removed from TF-IDF process
"""
less_params = tfidf_params.copy()
leaveout = ['binary', 'norm', 'smooth_idf', 'sublinear_tf', 'use_idf']
less_params = {k: less_params[k] for k in less_params.keys() if k not in leaveout}
# Get all unique words for mapping
vectorizer = sklearn.feature_extraction.text.\
CountVectorizer(**less_params)
vectors = vectorizer.fit_transform(text.tolist())
# Save feature names as a dictionary of Data Frame
unique_words = vectorizer.get_feature_names()
df_all = pandas.DataFrame(index = unique_words)
# Write the text back to self.data and sort by skill
# so that the ordering is correct
self.data = self.data.sort_values(level)
self.data[self.text] = text
# Create empty list to store results
vectors = list()
self.tfidf_factors = list()
for index in self.data[level].unique():
# if not index or index is numpy.nan:
# continue
# Extract the raw text for this study group
tmp_text = self.data.loc[
self.data[level] == index, [self.doc_id, self.text]]
# print(index, len(tmp_text.index))
# Train and Fit TF-IDF
vectorizer = sklearn.feature_extraction.text.\
TfidfVectorizer(**tfidf_params)
tmp_vectors = vectorizer.fit_transform(
tmp_text[self.text].tolist())
# Get the TF-IDF weights and feature names
tmp_weights = tmp_vectors.todense().tolist()
tmp_factors = vectorizer.get_feature_names()
# Store weights as a data frame
df_tmp = pandas.DataFrame(numpy.transpose(tmp_weights),
index = tmp_factors)
# Match with all unique words for identical structure
vectors2 = df_all \
.join(df_tmp, how = "left") \
.fillna(0)
vectors += vectors2.to_numpy() \
.T \
.tolist()
# # each row is a unique word, each column is a unique document
# vocab = pandas.DataFrame.from_dict(vectorizer.vocabulary_,
# orient="index", columns=["count"])
# vectors2.index.name = "i"
# vocab.index.name = "i"
# vocab = vocab.merge(vectors2, how="outer", on='i')
# print(vocab.head())
# Store features
self.tfidf_factors += [(index, tmp_factors)]
# Convert vectors back to sparse matrix
return scipy.sparse.csr_matrix(vectors)
# Get the series containing the vectorized documents
def get_preprocessed_text(self):
"""
Get the series containing the vectorized documents
"""
return self.vectorized_documents
# Get the features names for the numeric vector
def get_tfidf_feature_names(self):
"""
Get the features names for the tfidf feature names
"""
return self.tfidf_factors
def get_lsa_feature_names(self):
"""
Get the features names for the lsa feature names
"""
return self.lsa_factors
def get_skill(self):
"""
Get the series containing the unique skills
"""
return self.data[self.skill].unique()
def get_doc_type(self):
"""
Get the series containing the unique document type
"""
return self.data[self.doc_type].unique()
def get_study(self, skill_id = []):
"""
Get the series containing the unique study information by skill
Parameters
----------
skill_id: list
Put in the skill id that you want to look at.
"""
if len(skill_id) == 0:
skill_id = self.get_skill()
return self.data[self.data[self.skill] \
.isin(skill_id)][self.study].unique()
def check_preprocessing_input(self,
remove_stopwords,
filler_words,
stem,
lemm,
tfidf,
tfidf_level,
lsa,
lsa_n_components,
ngram,
remove_punctuation):
# Check if the method is coded correctly
# if method not in ('cosine'):
# raise SystemExit("Incorrect 'method' used. Use 'cosine'")
# Check preprocessing settings:
if remove_stopwords not in (True, False):
raise SystemExit("Incorrect 'remove_stopwords' used. \
Use True or False")
if stem not in (True, False):
raise SystemExit("Incorrect 'stem' used. Use True or False")
if lemm not in (True, False):
raise SystemExit("Incorrect 'lemm' used. Use True or False")
if stem and lemm:
raise SystemExit("Stemmed text may not also be lemmatized.")
if remove_punctuation not in (True, False, None):
raise SystemExit("Incorrect 'remove_punctuation' used. Use True or False")
if tfidf not in (True, False, "bool", "l1", "l2", "logn", "sub", "tf_only"):
raise SystemExit("Incorrect 'tfidf' used. Use True or False")
if tfidf_level not in ('full', 'skill', 'study', 'document'):
raise SystemExit("Incorrect 'tfidf_level' used. Use \
'full', 'skill', 'study' or 'document'. ")
if lsa not in (True, False):
raise SystemExit("Incorrect 'lsa' used. Use True or False")
if type(lsa_n_components) is not int:
raise SystemExit("Incorrect 'LSA_n_components' used. \
LSA_n_components must be an integer")
elif lsa_n_components < 2:
raise SystemExit("Incorrect 'LSA_n_components' used. \
Set LSA_n_components as an int that is greater than or equal to 2")
def create_sparse_matrix(self, data, col = 'cleaned_vectorized_document'):
"""
Convert the vectorized column to a sparse matrix
A sparse matrix is a matrix that majority of the elements are zero.
To save memory space and tos increase computational efficiency, we
convert the vectorized column to a sparse matrix to improve the
performance.
`cleaned_vectorized_document` is the column generated by the
preprocessing() function above. It is the reserved column name that
specifies the output of preprocessing.
Parameter
----------
data: Data Frame
Data Frame object that contains the column of the vectorized text
col: String
Column name of the column that contains vectorized text
"""
return scipy.sparse.csr_matrix([i for i in data[col]])
# Scenario #1: Normal
def normal_comparison(self,
method = 'cosine',
remove_stopwords = False,
filler_words = [],
stem = False,
lemm = False,
tfidf = False,
tfidf_level = 'skill',
lsa = False,
lsa_n_components = 2,
ngram = 1,
remove_punctuation = None):
"""
Get the cosine similarity between each transcripts to the benchmark
script for each skills
"""
# Check Input Value
self.check_preprocessing_input(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# NLP Preprocessing:
self.document_matrix = self.preprocessing(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# Sufficiency Check
if self.document_matrix.empty:
raise SystemExit("Insufficient data to process.")
# Collect the transcripts and sort by skill
transcript = self.document_matrix.\
loc[self.document_matrix[self.doc_type] == 'transcript'].\
sort_values(self.skill)
# Calculate the Similarity Score
if method == 'cosine':
# Create an empty list to store the similarity scores
similarity_score = numpy.array([])
# iterate over different skills
for skills in self.get_skill():
# Extract the script for this skill
tmp_script = self.document_matrix.\
loc[(self.document_matrix[self.doc_type] == 'script') & \
(self.document_matrix[self.skill] == skills)]
# Extract the transcript for this skill
tmp_transcript = transcript.\
loc[self.document_matrix[self.skill] == skills]
# Calculate the similarity score and store it
similarity_score = numpy.concatenate([similarity_score,
sklearn.metrics.pairwise.\
cosine_similarity(self.create_sparse_matrix(tmp_script),
self.create_sparse_matrix(tmp_transcript),
dense_output = True). \
reshape(1, -1)[0]])
# Write the similarity score back to the orignial DF
transcript['similarity_score'] = \
similarity_score.reshape(-1, 1) #.round(6)
# Return the output data frame
return(transcript)
else:
# Create an empty list to store the similarity scores
similarity_score = numpy.array([])
# iterate over different skills
for skills in self.get_skill():
# Extract the script for this skill
tmp_script = self.document_matrix.\
loc[(self.document_matrix[self.doc_type] == 'script') & \
(self.document_matrix[self.skill] == skills)]
# Extract the transcript for this skill
tmp_transcript = transcript.\
loc[self.document_matrix[self.skill] == skills]
primary_distance = sklearn.metrics.pairwise_distances(
X=self.create_sparse_matrix(tmp_transcript),
Y=self.create_sparse_matrix(tmp_script),
metric = method, #"cosine", "euclidean", 'manhattan'
force_all_finite = False) #,
#dense_output = True)
# Calculate the similarity score and store it
similarity_score = numpy.concatenate([similarity_score,
primary_distance.reshape(1, -1)[0]])
# Write the similarity score back to the orignial DF
transcript['similarity_score'] = \
similarity_score.reshape(-1, 1) #.round(6)
# Return the output data frame
return(transcript)
# Scenario #2: Pairwise
def pairwise_comparison(self,
method = 'cosine',
remove_stopwords = False,
filler_words = [],
stem = False,
lemm = False,
tfidf = False,
tfidf_level = 'skill',
lsa = False,
lsa_n_components = 2,
ngram = 1,
remove_punctuation = None):
"""
Get the cosine similarity among each transcripts within skills
"""
# Check Input Value
self.check_preprocessing_input(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation
)
# NLP Preprocessing:
self.document_matrix = self.preprocessing(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# Sufficiency Check
if self.document_matrix.empty:
raise SystemExit("Insufficient data to process.")
# Collect the transcripts and sort by skill
transcript = self.document_matrix.\
loc[self.document_matrix[self.doc_type] == 'transcript'].\
sort_values(self.skill)
# Calculate the Similarity Score
if method == 'cosine':
# Create an empty list to store the similarity scores
similarity_score = numpy.array([])
# iterate over different skills
for skills in self.get_skill():
# Extract the transcript for this skill
tmp_transcript = transcript.\
loc[self.document_matrix[self.skill] == skills]
# Calculate the similarity score and store it
# Steps:
# 1. Calculate the pairwise similarity
# 2. Apply the average function along the axis 0
# 3. Store the values within similarity_score array
similarity_score = numpy.concatenate([similarity_score,
numpy.apply_along_axis(lambda x: (sum(x) - 1) / (len(x) - 1),
0, sklearn.metrics.pairwise.cosine_similarity(
X = self.create_sparse_matrix(tmp_transcript),
dense_output = True))])
# Write the similarity score back to the orignial DF
transcript['similarity_score'] = \
similarity_score.reshape(-1, 1) #.round(6)
# Return the output data frame
return(transcript)
# Scenario 3
def within_study_normal_average(self,
method = 'cosine',
remove_stopwords = False,
filler_words = [],
stem = False,
lemm = False,
tfidf = False,
tfidf_level = 'skill',
lsa = False,
lsa_n_components = 2,
ngram = 1,
remove_punctuation = None):
"""
Get the average similarity score for each study
"""
output = self.normal_comparison(method = method,
remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
return output[[self.study, 'similarity_score']].\
groupby([self.study]).\
mean().\
reset_index()
# Scenario 4
def across_study_within_skill_normal_average(self,
method = 'cosine',
remove_stopwords = False,
filler_words = [],
stem = False,
lemm = False,
tfidf = False,
tfidf_level = 'skill',
lsa = False,
lsa_n_components = 2,
ngram = 1,
remove_punctuation = None):
"""
Get the average similarity score for each study
"""
# Check Input Value
self.check_preprocessing_input(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# NLP Preprocessing:
self.document_matrix = self.preprocessing(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# Remove non-transcripts, sort by skill, study
tmp_data = self.document_matrix.copy().\
loc[self.document_matrix[self.doc_type] == 'transcript'].\
sort_values([self.skill, self.study])
# Create an empty list to store the similarity scores
similarity_score = list()
# iterate over different skills
for skills in self.get_skill():
# Within each skill, iterate over different study
for studies in self.get_study(skill_id = [skills]):
# doc_type = 'script' will have nan for study
if pandas.isnull(studies):
pass
else:
# tmp_script will be the study we want to compare
tmp_script = tmp_data.\
loc[(tmp_data[self.skill] == skills) &
(tmp_data[self.study] == studies)]. \
reset_index()
# tmp_transcript will be all other studies within the skill
tmp_transcript = tmp_data.\
loc[(tmp_data[self.skill] == skills) &
(tmp_data[self.study] != studies)].\
reset_index()
# Iterate over each transcripts
for index, _ in tmp_script.iterrows():
similarity_score += [sklearn.metrics.pairwise.cosine_similarity(
self.create_sparse_matrix(tmp_script.iloc[index,]),
self.create_sparse_matrix(tmp_transcript),
dense_output = True).mean()] # Average of all tmp_transcripts
# Write the similarity score back to the orignial DF
tmp_data['similarity_score'] = numpy.asarray(similarity_score).\
reshape(-1, 1) #.round(6)
# Return the output data frame
return(tmp_data)
# Scenario 5
def across_study_across_skill_normal_average(self,
method = 'cosine',
remove_stopwords = False,
filler_words = [],
stem = False,
lemm = False,
tfidf = False,
tfidf_level = 'skill',
lsa = False,
lsa_n_components = 2,
ngram = 1,
remove_punctuation = None):
"""
Get the average similarity score for each study
"""
# Check Input Value
self.check_preprocessing_input(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# NLP Preprocessing:
self.document_matrix = self.preprocessing(remove_stopwords = remove_stopwords,
filler_words = filler_words,
stem = stem,
lemm = lemm,
tfidf = tfidf,
tfidf_level = tfidf_level,
lsa = lsa,
lsa_n_components = lsa_n_components,
ngram = ngram,
remove_punctuation = remove_punctuation)
# Remove non-transcripts, sort by skill, study
tmp_data = self.document_matrix.copy().\
loc[self.document_matrix[self.doc_type] == 'transcript'].\
sort_values([self.skill, self.study])
# Create an empty list to store the similarity scores
similarity_score = list()
# Within each skill, iterate over different study
for studies in self.get_study():
# doc_type = 'script' will have nan for study
if pandas.isnull(studies):
pass
else:
# tmp_script will be the study we want to compare
tmp_script = tmp_data.loc[tmp_data[self.study] == studies]. \
reset_index()
# tmp_transcript will be all other studies within the skill
tmp_transcript = tmp_data.loc[tmp_data[self.study] != studies]. \
reset_index()
# Iterate over each transcripts
for index, _ in tmp_script.iterrows():
similarity_score += [sklearn.metrics.pairwise.cosine_similarity(
self.create_sparse_matrix(tmp_script.iloc[index,]),
self.create_sparse_matrix(tmp_transcript),
dense_output = True).mean()] # Average of all tmp_transcripts
# Write the similarity score back to the orignial DF
tmp_data['similarity_score'] = numpy.asarray(similarity_score).\
reshape(-1, 1) #.round(6)
# Return the output data frame
return(tmp_data)
class PreprocessCorpusText:
"""
Parameters
----------
source_dir : String
Maybe either:
Directory address containing .txt files of corpus documents.
Or:
Dataframe where each row contains the text of a corpus document.
"""
def collect_directory(self, source_dir, recursive=False):
"""
Extract each line of each file in a directory [source_dir]
of text documents. Return a single dataframe of
labeled lines from documents.
"""
# columns of final DF output
dfcolumns=['doc_id', 'source_dir', 'subdir', 'filename', \
"collected", "rtlen", "rawtext"]
collect_df = pandas.DataFrame()
# if seeking recursive search, look in all subfolders as default
# otherwise, return first result of os.walk(), i.e. base folder only
src_dir = os.walk(source_dir)
if not recursive:
src_dir = next(src_dir)
# enumerate all files in given directory
f = [os.path.join(root, name) \
for root, _, files in os.walk(source_dir) \
for name in files if ".txt" in name]
if not f:
raise SystemExit("The target directory must contain .txt files.")
# for each text file identified, extract text lines
for i, file in enumerate(f):
#open source file
# sometimes problems with utf-8 or latin-1 encodings,
# cp1252 appeared to work consistently
with open(file, encoding="cp1252") as f:
#remove all non-ascii characters by encoding ascii
# then decode again
# strip lines of extra whitespace
lines = [line.encode("ascii", "ignore")\
.decode().rstrip('\n') for line in f]
# create dataframe of lines to modify
df = pandas.DataFrame(lines,columns=["rawtext"])
# strip any remaining extremity whitespace
df['rawtext'] = df['rawtext'].str.strip()
# add line length
df['rtlen'] = df.rawtext.str.len()
# remove any zero length or NA lines
df = df[df.rtlen > 0]
df = df.dropna()
# add incrementing ID per file
df['doc_id'] = i
#fs = file.split("\\")[-3:]
splitpath = list(os.path.split(file))
splitpath[0] = splitpath[0].replace(source_dir, "")
splitpath = [source_dir] + splitpath
df[['source_dir', 'subdir', 'filename']] = splitpath
#df['dir_path'], df['filename']
df['collected'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
df = df[dfcolumns]
#df['dir_path'] =
# df = df.assign(**{'parentdir1':fs[0],
# 'parentdir2':fs[1],
# 'filename':fs[2]})
collect_df = collect_df.append(df)
collect_df['line_id'] = collect_df.index
return collect_df
def explode_lines(self, col_name):
"""
Given a column named [col_name] containing line breaks,
explode the dataset so that every single line is a separate row.
returns new instance of the class object
"""
if col_name not in self.df.columns:
raise SystemExit("Input [col_name] must be a column in this dataset.")
# remove unhelpful \r character
obj = self.copy()
obj.df = obj.df.replace("\r", "")
# separate multi-line text to individual rows
obj.df[col_name] = obj.df[col_name].str.split("\n")
obj.df = obj.df.explode(col_name)
# clean up any resulting empty rows
obj.df = obj.df.dropna(subset=[col_name])
return obj
def copy(self):
"""
create a new instance of PreprocessCorpusText,
with the same data as this instance
"""
return PreprocessCorpusText(self.df, text=self.curr_txt)
def __init__(self, data_source, recursive=False, text=None): #, directory=None, document_dataframe=None
validentry = "Some data source must be added, either a directory of txt files, or an existing Pandas dataframe of documents."
if isinstance(data_source, pandas.DataFrame):
if not text:
raise SystemExit("This class requires input dataframes to" + \
" specify the column containing document" + \
" text with [text].")
self.df = data_source.copy()
cols_check = {'doc_id': self.df.index,
'source_dir': pandas.NA,
'subdir': pandas.NA,
'filename': pandas.NA,
'collected': datetime.datetime.now()\
.strftime('%Y-%m-%d %H:%M:%S'),
'rawtext': self.df[text]
}
checkcols = self.df.columns
for cc in cols_check.keys():
if cc not in checkcols:
self.df[cc] = cols_check[cc]
self.curr_txt = text
self.data_sources = "dataframe"
elif os.path.isdir(str(data_source)):
self.data_sources = data_source
self.df = self.collect_directory(data_source, recursive)
self.curr_txt = "rawtext"
else:
raise SystemExit(validentry)
self.__name__ = "PreprocessCorpusText"
def group_by_speaker(self, speaker):
"""
Returns dataframe where text has been joined by speaker.
Accepts source [dataframe],
and column names for speaker [speaker],
text to be concatenated [text],
and ID to identify each unique document [doc_id].
returns new instance of the class object
"""
obj = self.copy()
stablecols = ["doc_id", "source_dir", "subdir", \
"filename", "collected", speaker]
#select columns not abstractable to documents, remove duplicates
limdf = self.df.copy()
limdf = limdf[["doc_id", "period",
"filename", "collected"]].drop_duplicates()
errmsg = "[speaker] must all be a column name in the given dataset"
if not isinstance(speaker, str) or \
speaker not in obj.df.columns:
raise SystemExit(errmsg)
if not isinstance(obj.df, pandas.DataFrame):
raise SystemExit(errmsg)
# print(obj.curr_txt)
# concatenate text by speaker
speak_red = obj.df.groupby([speaker, "doc_id"])\
[obj.curr_txt].apply(' '.join).reset_index()
obj.df = pandas.merge(limdf, speak_red, \
how="inner", on="doc_id")
print("Speaker names extracted")
return obj
def extr(self, x, pattern, mult):
"""
Function for Pandas Apply vectorizing.
Extract from src text [x] to add to a separate column
if any match of the given regex [pattern].
If [mult]=True then extract multiple regex pattern group matches.
"""
if not x or x is numpy.nan:
return numpy.nan
out = re.findall(pattern, x)
if not out: #exit if no matches at all
return numpy.nan
if type(out[0]) == tuple:
out = [x.strip() for x in list(out[0]) if x]
elif len(out) > 1:
out = [x.strip() for x in out if x]
if not out: #exit if matches are all empty
return numpy.nan
if type(out) == list and not mult:
out = out[0]
return out
def add_col_from_extract(self, df1, colfrom, newcolname, regex, \
mult=False, from_prev_row=False):
"""
Return the original given dataframe [df1] with a
new column [newcolname] created from matches returned from
the given regex pattern [regex] applied to a src column [colfrom].
If [mult]=True, returns list of all matches, not just first.
If from_prev_row, returns [regex] match from previous instead of
current row.
returns new instance of the class object
"""
# obj = self.copy()
# df1 = obj.df
# colfrom = self.curr_txt
# create empty column
df1[newcolname] = numpy.nan
# if shifting, use shift function inside where equals [shift_equals]
if from_prev_row:
df1['prevrow'] = df1[colfrom].shift(1, axis = 0)
df1[newcolname] = df1.apply( \
lambda x: self.extr(x['prevrow'], regex, mult), axis=1)
# df1[newcolname] = numpy.where( \
# df1[colfrom].shift(1, axis = 0) == regex,
# df1[colfrom], numpy.nan)
# remove extracted text from src column
df1[colfrom] = numpy.where(~df1[newcolname].isnull(), "",df1[colfrom])
df1 = df1.drop(columns=["prevrow"])
else:
# otherwise, add regex match to new column
df1[newcolname] = df1[colfrom].apply( \
lambda x: self.extr(x, regex, mult))
# remove extracted text from src column
df1[colfrom] = df1[colfrom].apply(lambda x: \
re.sub(regex, "", x).strip())
# clean up beginning/end of reduced src column
df1[colfrom] = df1[colfrom].str.lstrip(": ").str.strip()
df1[colfrom] = df1[colfrom].str.lstrip("-")
# return output
print(f"{newcolname} extracted into a new column")
return df1
def addumn(self, colname, contents):
"""
Add a new column to the dataset, named [colname],
and the values should be [contents].
If [contents] is a string and the name of an existing column,
copy existing column [contents] to the new column.
"""
if isinstance(contents, str) and contents in self.df.columns:
self.df[colname] = self.df[contents].copy()
else:
self.df[colname] = contents
def new_text_column(self, new_text_name):
"""
create a new column of text to process named [new_text_name],
automatically updates internal text col tracking
returns new instance of the class object
"""
obj = self.copy()
obj.addumn(new_text_name, obj.curr_txt)
obj.curr_txt = new_text_name
return obj
def join_dataset(self, newdf, join_on, assign_text):
"""
join current dataset with new dataset [newdf],
assuming inner join,
join on the column named [join_on] which must exist
in both datasets
for the benefit of the object,
set column named [assign_text] as text analysis target
returns new instance of the class object
"""
obj = self.copy()
obj.df = pandas.merge(obj.df, newdf,
how="inner", on=join_on)
obj.curr_txt = assign_text
return obj
# convert timestamp to numeric second counter
def colon_delim_timestamp_to_second(self, x):
"""
Apply vectorizer function, accepts raw text like timestamp,
returns number of hours, minutes, and seconds converted to
a single numeric seconds value.
"""
if | pandas.isna(x) | pandas.isna |
import pandas as pd
import numpy as np
import pytest
import re
import tubular
import tubular.testing.helpers as h
import tubular.testing.test_data as data_generators_p
import input_checker
from input_checker._version import __version__
from input_checker.checker import InputChecker
from input_checker.exceptions import InputCheckerError
class TestInit(object):
"""Tests for InputChecker.init()."""
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {0: {"args": (), "kwargs": {"columns": ["a", "b"]}}}
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
InputChecker(columns=["a", "b"])
def test_inheritance(self):
"""Test that InputChecker inherits from tubular.base.BaseTransformer."""
x = InputChecker()
h.assert_inheritance(x, tubular.base.BaseTransformer)
def test_arguments(self):
"""Test that InputChecker init has expected arguments."""
h.test_function_arguments(
func=InputChecker.__init__,
expected_arguments=[
"self",
"columns",
"categorical_columns",
"numerical_columns",
"datetime_columns",
"skip_infer_columns",
],
expected_default_values=(None, None, None, None, None),
)
def test_version_attribute(self):
"""Test that __version__ attribute takes expected value."""
x = InputChecker(columns=["a"])
h.assert_equal_dispatch(
expected=__version__,
actual=x.version_,
msg="__version__ attribute",
)
def test_columns_attributes_generated(self):
"""Test all columns attributes are saved with InputChecker init"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert hasattr(x, "columns") is True, "columns attribute not present after init"
assert (
hasattr(x, "numerical_columns") is True
), "numerical_columns attribute not present after init"
assert (
hasattr(x, "categorical_columns") is True
), "categorical_columns attribute not present after init"
assert (
hasattr(x, "datetime_columns") is True
), "datetime_columns attribute not present after init"
assert (
hasattr(x, "skip_infer_columns") is True
), "skip_infer_columns attribute not present after init"
def test_check_type_called(self, mocker):
"""Test all check type is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_check_type")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._check_type with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (
x,
["a", "b", "c", "d"],
"input columns",
[list, type(None), str],
)
expected_pos_args_1 = (
x,
["b"],
"categorical columns",
[list, str, type(None)],
)
expected_pos_args_2 = (
x,
["a"],
"numerical columns",
[list, dict, str, type(None)],
)
expected_pos_args_3 = (
x,
["d"],
"datetime columns",
[list, dict, str, type(None)],
)
expected_pos_args_4 = (
x,
["c"],
"skip infer columns",
[list, type(None)],
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _check_type call for columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _check_type call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _check_type call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _check_type call for datetime columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _check_type call for skip infer columns argument"
def test_check_is_string_value_called(self, mocker):
"""Test all check string is called by the init method when option set to infer."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_string_value")
x = InputChecker(
numerical_columns="infer",
categorical_columns="infer",
datetime_columns="infer",
)
assert (
spy.call_count == 3
), "unexpected number of calls to InputChecker._is_string_value with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
expected_pos_args_0 = (x, x.categorical_columns, "categorical columns", "infer")
expected_pos_args_1 = (x, x.numerical_columns, "numerical columns", "infer")
expected_pos_args_2 = (x, x.datetime_columns, "datetime columns", "infer")
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_string_value call for numerical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
def test_check_is_empty_called(self, mocker):
"""Test all check is empty is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_empty")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 4
), "unexpected number of calls to InputChecker._is_empty with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
expected_pos_args_0 = (x, "input columns", ["a", "b", "c", "d"])
expected_pos_args_1 = (x, "categorical columns", ["b", "c"])
expected_pos_args_2 = (x, "numerical columns", ["a"])
expected_pos_args_3 = (x, "datetime columns", ["d"])
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_empty call for categorical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
def test_check_is_listed_in_columns_called(self, mocker):
spy = mocker.spy(input_checker.checker.InputChecker, "_is_listed_in_columns")
InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._is_listed_in_columns with init"
class TestConsolidateInputs(object):
def test_arguments(self):
"""Test that _consolidate_inputs has expected arguments."""
h.test_function_arguments(
func=InputChecker._consolidate_inputs,
expected_arguments=["self", "X"],
expected_default_values=None,
)
def test_infer_datetime_columns(self):
"""Test that _consolidate_inputs infers the correct datetime columns"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert x.datetime_columns == [
"d",
"e",
], "infer datetime not finding correct columns"
def test_infer_datetime_dict(self):
"""Test that _consolidate_inputs infers the correct datetime dict"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.datetime_dict["d"]["maximum"] is False
), "infer numerical not specifying maximum value check as true"
assert (
x.datetime_dict["d"]["minimum"] is True
), "infer numerical not specifying maximum value check as true"
def test_infer_categorical_columns(self):
"""Test that _consolidate_inputs infers the correct categorical columns"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x.fit(df)
assert x.categorical_columns == [
"b",
"c",
"d",
], "infer categorical not finding correct columns"
def test_infer_numerical_columns(self):
"""Test that _consolidate_inputs infers the correct numerical columns"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert x.numerical_columns == [
"a"
], "infer numerical not finding correct columns"
def test_infer_numerical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring numerical"""
x = InputChecker(numerical_columns="infer", skip_infer_columns=["a"])
df = data_generators_p.create_df_2()
df["d"] = df["a"]
x.fit(df)
assert x.numerical_columns == [
"d"
], "infer numerical not finding correct columns when skipping infer columns"
def test_infer_categorical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring categorical"""
x = InputChecker(categorical_columns="infer", skip_infer_columns=["b"])
df = data_generators_p.create_df_2()
x.fit(df)
assert x.categorical_columns == [
"c"
], "infer categorical not finding correct columns when skipping infer columns"
def test_infer_datetime_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring datetime"""
x = InputChecker(datetime_columns="infer", skip_infer_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["a"] = df["d"]
x.fit(df)
assert x.datetime_columns == [
"a"
], "infer datetime not finding correct columns when skipping infer columns"
def test_infer_numerical_dict(self):
"""Test that _consolidate_inputs infers the correct numerical dict"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
x.numerical_dict["a"]["maximum"] is True
), "infer numerical not specifying maximum value check as true"
assert (
x.numerical_dict["a"]["minimum"] is True
), "infer numerical not specifying minimum value check as true"
def test_datetime_type(self):
"""Test that datetime columns is a list after calling _consolidate_inputs"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
type(x.datetime_columns) is list
), f"incorrect datetime_columns type returned from _consolidate_inputs - expected: list but got: {type(x.datetime_columns)} "
def test_categorical_type(self):
"""Test that categorical columns is a list after calling _consolidate_inputs"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.categorical_columns) is list
), f"incorrect categorical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.categorical_columns)} "
def test_numerical_type(self):
"""Test that numerical columns and dict are a list and dict after calling _consolidate_inputs"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.numerical_columns) is list
), f"incorrect numerical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.numerical_columns)} "
assert (
type(x.numerical_dict) is dict
), f"incorrect numerical_dict type returned from _consolidate_inputs - expected: dict but got: {type(x.numerical_dict)} "
def test_check_is_subset_called(self, mocker):
"""Test all check _is_subset is called by the _consolidate_inputs method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["c"],
datetime_columns=["d"],
skip_infer_columns=["b"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
spy = mocker.spy(input_checker.checker.InputChecker, "_is_subset")
x.fit(df)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._is_subset with _consolidate_inputs"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (x, "skip infer columns", ["b"], df)
expected_pos_args_1 = (x, "input columns", ["a", "b", "c", "d"], df)
expected_pos_args_2 = (x, "categorical columns", ["c"], df)
expected_pos_args_3 = (x, "numerical columns", ["a"], df)
expected_pos_args_4 = (x, "datetime columns", ["d"], df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_subset call for skip_infer_columns columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_subset call for input columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_subset call for categorical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_subset call for numerical columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _is_subset call for datetime columns argument"
class TestFitTypeChecker(object):
"""Tests for InputChecker._fit_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_type_checker, expected_arguments=["self", "X"]
)
def test_no_column_classes_before_fit(self):
"""Test column_classes is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "column_classes") is False
), "column_classes attribute present before fit"
def test_column_classes_after_fit(self):
"""Test column_classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(
x, "column_classes"
), "column_classes attribute not present after fit"
def test_correct_columns_classes(self):
"""Test fit type checker saves types for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.column_classes.keys()) == [
"a"
], f"incorrect values returned from _fit_value_checker - expected: ['a'] but got: {list(x.column_classes.keys())}"
def test_correct_classes_identified(self):
"""Test fit type checker identifies correct classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.column_classes["a"] == "float64"
), f"incorrect type returned from _fit_type_checker for column 'a' - expected: float64 but got: {x.column_classes['a']}"
assert (
x.column_classes["b"] == "object"
), f"incorrect type returned from _fit_type_checker for column 'b' - expected: object but got: {x.column_classes['b']}"
assert (
x.column_classes["c"] == "category"
), f"incorrect type returned from _fit_type_checker for column 'c' - expected: category but got: {x.column_classes['c']}"
assert (
x.column_classes["d"] == "datetime64[ns]"
), f"incorrect type returned from _fit_type_checker for column 'd' - expected: datetime64[ns] but got: {x.column_classes['d']}"
class TestFitNullChecker(object):
"""Tests for InputChecker._fit_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_null_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test null_map is not present before fit called"""
x = InputChecker()
assert hasattr(x, "null_map") is False, "null_map attribute present before fit"
def test_expected_values_after_fit(self):
"""Test null_map is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(x, "null_map"), "null_map attribute not present after fit"
def test_correct_columns_nulls(self):
"""Test fit nulls checker saves map for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.null_map.keys()) == [
"a"
], f"incorrect values returned from _fit_null_checker - expected: ['a'] but got: {list(x.null_map.keys())}"
def test_correct_classes_identified(self):
"""Test fit null checker identifies correct columns with nulls after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["b"] = df["b"].fillna("a")
x.fit(df)
assert (
x.null_map["a"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['a']}"
assert (
x.null_map["b"] == 0
), f"incorrect values returned from _fit_null_checker - expected: 0 but got: {x.null_map['b']}"
assert (
x.null_map["c"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['c']}"
class TestFitValueChecker(object):
"""Tests for InputChecker._fit_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_value_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(categorical_columns=["b", "c"])
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test expected_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert hasattr(
x, "expected_values"
), "expected_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit value checker saves levels for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert list(x.expected_values.keys()) == [
"b",
"c",
], f"incorrect values returned from _fit_value_checker - expected: ['b', 'c'] but got: {list(x.expected_values.keys())}"
def test_correct_values_identified(self):
"""Test fit value checker identifies corrcet levels after fit called"""
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x = InputChecker(categorical_columns=["b", "c", "d"])
x.fit(df)
assert x.expected_values["b"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['b']}"
assert x.expected_values["c"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['c']}"
assert x.expected_values["d"] == [
True,
False,
], f"incorrect values returned from _fit_value_checker - expected: [True, False, np.nan] but got: {x.expected_values['d']}"
class TestFitNumericalChecker(object):
"""Tests for InputChecker._fit_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_numerical_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test numerical_values is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test numerical_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert hasattr(
x, "numerical_values"
), "numerical_values attribute not present after fit"
def test_correct_columns_num_values(self):
"""Test fit numerical checker saves values for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert list(x.numerical_values.keys()) == [
"a"
], f"incorrect values returned from numerical_values - expected: ['a'] but got: {list(x.numerical_values.keys())}"
def test_correct_numerical_values_identified(self):
"""Test fit numerical checker identifies correct range values after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] == 1
), f"incorrect values returned from _fit_numerical_checker - expected: 0 but got: {x.numerical_values['a']['minimum']}"
def test_correct_numerical_values_identified_dict(self):
"""Test fit numerical checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] is None
), f"incorrect values returned from _fit_numerical_checker - expected: None but got: {x.numerical_values['a']['minimum']}"
class TestFitDatetimeChecker(object):
"""Tests for InputChecker._fit_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_datetime_checker, expected_arguments=["self", "X"]
)
def test_no_datetime_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(datetime_columns=["b", "c"])
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present before fit"
def test_datetime_values_after_fit(self):
"""Test datetime_values is present after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert hasattr(
x, "datetime_values"
), "datetime_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit datetime checker saves minimum dates for correct columns after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert list(x.datetime_values.keys()) == [
"d",
"e",
], f"incorrect values returned from _fit_datetime_checker - expected: ['d', 'e'] but got: {list(x.datetime_values.keys())} "
def test_correct_datetime_values_identified(self):
"""Test fit datetime checker identifies correct minimum bound after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d is None
), f"incorrect values returned from _fit_datetime_checker - expected: None, but got: {actual_max_d}"
def test_correct_datetime_values_identified_dict(self):
"""Test fit datetime checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
expected_max_d = | pd.to_datetime("01/02/2021") | pandas.to_datetime |
import os
import pandas as pd
from ccf.config import LoadSettings
from ccf.redcap import CachedRedcap
from utils.utils import strip_special_chars, to_numeric, convert_nan, synchronize_dtypes
def load():
config = LoadSettings()
downloads_dir = config["KSADS"]["download_dir"]
snapshots = [os.path.join(downloads_dir, x) for x in sorted(os.listdir(downloads_dir)) if x.startswith("snapshot")]
latest_snapshot = snapshots[-1]
print("Latest snapshot is ", latest_snapshot)
# %%
ksads = pd.read_csv(latest_snapshot, low_memory=False)
ksads = strip_special_chars(ksads)
ksads.dateofinterview = pd.to_datetime(ksads.dateofinterview)
complete_columns = ksads.columns[ksads.columns.str.endswith("_complete")]
ksads["common_complete"] = 1
ksads[complete_columns] = ksads[complete_columns].mask(ksads[complete_columns].isna(), 0).astype(int)
# %%
redcap = CachedRedcap()
current_redcap = redcap("ksads")
current_redcap.dateofinterview = | pd.to_datetime(current_redcap.dateofinterview) | pandas.to_datetime |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Input:
messages, the messages received from different channels after the disaster
categories, the messages corresponding categories
Output:
df the data file after merging the messages and categories tables
'''
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
"""Calculate a Confusion Matrix for multi-class classification results
2019 <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class ConfusionMatrix:
"""Calculate a Confusion Matrix
Parameters
----------
y : array-like, true labels
p : array-like, predicted labels of same type and length as y
Attributes
----------
y : see above
p : see above
df : Pandas DataFrame, aligned y and p data
dfg : Pandas DataFrame, grouped by (y, p) combinations and counts
a : Numpy array, confusion matrix values
df_cm : Pandas DataFrame, confusion matrix values with row/column labels
"""
def __init__(self, y, p):
assert type(y) == type(p)
assert len(y) == len(p)
self.categorical = False
if isinstance(y[0], str):
self.categorical = True
if self.categorical:
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(y)
y = le.transform(y)
p = le.transform(p)
self.y = y
self.p = p
self.df = pd.DataFrame({'y': self.y, 'p': self.p})
self.n_classes = len(self.df.y.unique())
self.labels = range(self.n_classes)
if self.categorical:
self.labels = le.inverse_transform(self.labels)
self.a = np.zeros((self.n_classes, self.n_classes))
self.dfg = self.df.groupby(['p', 'y']).size().reset_index().rename(columns={0: 'n'})
_ = self.dfg.apply(lambda x: self.assemble_cm(x), axis=1)
self.df_cm = | pd.DataFrame(self.a, self.labels, self.labels) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys
import jpstocks
import warnings
import pandas as pd
import datetime as dt
import time as time
from sqlalchemy import create_engine
warnings.simplefilter("ignore", category=UserWarning)
q = jpstocks.Quotes()
categories = [
'0050', # 農林・水産業
'1050', # 鉱業
'2050', # 建設業
'3050', # 食料品
'3100', # 繊維製品
'3150', # パルプ・紙
'3200', # 化学
'3250', # 医薬品
'3300', # 石油・石炭製品
'3350', # ゴム製品
'3400', # ガラス・土石製品
'3450', # 鉄鋼
'3500', # 非鉄金属
'3550', # 金属製品
'3600', # 機械
'3650', # 電気機器
'3700', # 輸送機器
'3750', # 精密機器
'3800', # その他製品
'4050', # 電気・ガス業
'5050', # 陸運業
'5100', # 海運業
'5150', # 空運業
'5200', # 倉庫・運輸関連業
'5250', # 情報・通信
'6050', # 卸売業
'6100', # 小売業
'7050', # 銀行業
'7100', # 証券業
'7150', # 保険業
'7200', # その他金融業
'8050', # 不動産業
'9050' # サービス業
]
for i in range(len(categories)):
try:
brands = q.get_brand(categories[i])
except:
pass
lis = []
for b in brands:
f = q.get_finance(b.ccode)
# start = dt.date.today()
start = dt.datetime(2020, 2, 7)
# end = dt.date.today()
end = dt.datetime(2020, 2, 7)
try:
h = q.get_historical_prices(b.ccode, jpstocks.DAILY, start_date=start, end_date=end)
except jpstocks.exceptions.CCODENotFoundException:
pass
try:
yesterdayPrice = h.__getitem__(0).close
if ((f.years_low + f.years_high) / 2 - yesterdayPrice) / ((f.years_low + f.years_high) / 2) > 0: # 低于平均价(平均价-现在价 / 平均价)
dic = {
'category_code': categories[i],
'market_name': b.market,
'stock_code': b.ccode,
'company_name': b.name,
'market_cap': f.market_cap / 100, # 亿日元
'years_low_price': f.years_low,
'years_high_price': f.years_high,
'years_average_price': (f.years_low + f.years_high) / 2,
'yesterday_price': yesterdayPrice,
'diff_average_price': yesterdayPrice - (f.years_low + f.years_high) / 2, # 平均差价
'decline_ratio': '%.2f' % ((((f.years_low + f.years_high) / 2 - yesterdayPrice) / ((f.years_low + f.years_high) / 2)) * 100), # 平均价跌幅比率
'dividend_one': f.dividend_one,
'price_min': '%.2f' %(f.price_min / 10000), # 万日元
'create_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
}
lis.append(dic)
except:
pass
engine = create_engine(
'mysql+pymysql://aliyun:[email protected]:3306/blog')
sql = "delete from t_stock_info where create_time < " + "'" + dt.date.today().__str__() + "'"
try:
pd.read_sql_query(sql, engine)
except:
print('deleted')
df = | pd.DataFrame(lis) | pandas.DataFrame |
import json, os
import pandas as pd
import numpy as np
from sklearn import preprocessing
from joblib import dump, load
def doFeatureEngineering(rawData, rawDataFormat, trainingMode,
dropCols, binaryCols, catCols,
numericalCols, toPredict,
saveCSV, saveX, savey,
oheModelFile):
print('Loading the data ...')
if rawDataFormat == 'json':
data = json.load(open(rawData))
df = pd.DataFrame(data['result']['records'])
elif rawDataFormat == 'csv':
df = | pd.read_csv(raw_data) | pandas.read_csv |
import pandas as pd
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pickle
from src.utils.utils import get_file_names
from src.utils.utils import get_file_path
def display_features(features, feature_names):
df = | pd.DataFrame(data=features, columns=feature_names) | pandas.DataFrame |
###########################################################################################################
## IMPORTS
###########################################################################################################
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
from keras.layers.advanced_activations import LeakyReLU, ELU, ReLU
from keras.models import Sequential, Model, model_from_json
from keras.layers import Activation, Convolution2D, Conv2D, LocallyConnected2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dense, Dropout, Input, concatenate, add, Add, ZeroPadding2D, GlobalMaxPooling2D, DepthwiseConv2D
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras.optimizers import Adam
from keras.regularizers import l2
#from keras.activations import linear, elu, tanh, relu
from keras import metrics, losses, initializers, backend
from keras.utils import multi_gpu_model
from keras.initializers import glorot_uniform, Constant, lecun_uniform
from keras import backend as K
os.environ["PATH"] += os.pathsep + "C:/ProgramData/Anaconda3/GraphViz/bin/"
os.environ["PATH"] += os.pathsep + "C:/Anaconda/Graphviz2.38/bin/"
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import train_test_split
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
tf.get_logger().setLevel('ERROR')
physical_devices = tf.config.list_physical_devices('GPU')
for pd_dev in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[pd_dev], True)
##from tensorflow.compat.v1.keras.backend import set_session
##config = tf.compat.v1.ConfigProto()
##config.gpu_options.per_process_gpu_memory_fraction = 0.9
##config.gpu_options.allow_growth = True
##config.log_device_placement = True
##set_session(config)
#config = tf.compat.v1.ConfigProto()
#config.gpu_options.allow_growth = True
#config.log_device_placement = True
#sess = tf.compat.v1.InteractiveSession(config = config)
#set_session(sess)
#backend.set_session(sess)
###########################################################################################################
## PLOTTING PALETTE
###########################################################################################################
# Create a dict object containing U.C. Berkeley official school colors for plot palette
# reference : https://alumni.berkeley.edu/brand/color-palette
berkeley_palette = {'berkeley_blue' : '#003262',
'california_gold' : '#FDB515',
'metallic_gold' : '#BC9B6A',
'founders_rock' : '#2D637F',
'medalist' : '#E09E19',
'bay_fog' : '#C2B9A7',
'lawrence' : '#00B0DA',
'sather_gate' : '#B9D3B6',
'pacific' : '#53626F',
'soybean' : '#9DAD33',
'california_purple' : '#5C3160',
'south_hall' : '#6C3302'}
###########################################################################################################
## CLASS CONTAINING MODEL ZOO
###########################################################################################################
class Models(object):
def __init__(self, model_path, **kwargs):
super(Models, self).__init__(** kwargs)
# validate that the constructor parameters were provided by caller
if (not model_path):
raise RuntimeError('path to model files must be provided on initialization.')
# ensure all are string snd leading/trailing whitespace removed
model_path = str(model_path).replace('\\', '/').strip()
if (not model_path.endswith('/')): model_path = ''.join((model_path, '/'))
# validate the existence of the data path
if (not os.path.isdir(model_path)):
raise RuntimeError("Models path specified'%s' is invalid." % model_path)
self.__models_path = model_path
self.__GPU_count = len(tf.config.list_physical_devices('GPU'))
self.__MIN_early_stopping = 10
#------------------------------------------------
# Private Methods
#------------------------------------------------
# plotting method for keras history arrays
def __plot_keras_history(self, history, metric, model_name, feature_name, file_name, verbose = False):
# Plot the performance of the model training
fig = plt.figure(figsize=(15,8),dpi=80)
ax = fig.add_subplot(121)
ax.plot(history.history[metric][1:], color = berkeley_palette['founders_rock'], label = 'Train',
marker = 'o', markersize = 4, alpha = 0.9)
ax.plot(history.history["".join(["val_",metric])][1:], color = berkeley_palette['medalist'], label = 'Validation',
marker = 'o', markersize = 4, alpha = 0.9)
ax.set_title(" ".join(['Model Performance',"(" + model_name + ")"]) + "\n" + feature_name,
color = berkeley_palette['berkeley_blue'], fontsize = 15, fontweight = 'bold')
ax.spines["top"].set_alpha(.0)
ax.spines["bottom"].set_alpha(.3)
ax.spines["right"].set_alpha(.0)
ax.spines["left"].set_alpha(.3)
ax.set_xlabel("Epoch", fontsize = 12, horizontalalignment='right', x = 1.0, color = berkeley_palette['berkeley_blue'])
ax.set_ylabel(metric, fontsize = 12, horizontalalignment='right', y = 1.0, color = berkeley_palette['berkeley_blue'])
plt.legend(loc = 'upper right')
ax = fig.add_subplot(122)
ax.plot(history.history['loss'][1:], color = berkeley_palette['founders_rock'], label = 'Train',
marker = 'o', markersize = 4, alpha = 0.9)
ax.plot(history.history["".join(["val_loss"])][1:], color = berkeley_palette['medalist'], label = 'Validation',
marker = 'o', markersize = 4, alpha = 0.9)
ax.set_title(" ".join(['Model Performance',"(" + model_name + ")"]) + "\n" + feature_name,
color = berkeley_palette['berkeley_blue'], fontsize = 15, fontweight = 'bold')
ax.spines["top"].set_alpha(.0)
ax.spines["bottom"].set_alpha(.3)
ax.spines["right"].set_alpha(.0)
ax.spines["left"].set_alpha(.3)
ax.set_xlabel("Epoch", fontsize = 12, horizontalalignment='right', x = 1.0, color = berkeley_palette['berkeley_blue'])
ax.set_ylabel("Loss", fontsize = 12, horizontalalignment='right', y = 1.0, color = berkeley_palette['berkeley_blue'])
plt.legend(loc = 'upper right')
plt.tight_layout()
plt.savefig(file_name, dpi=300)
if verbose: print("Training plot file saved to '%s'." % file_name)
plt.close()
# load Keras model files from json / h5
def __load_keras_model(self, model_name, model_file, model_json, verbose = False):
"""Loads a Keras model from disk"""
if not os.path.isfile(model_file):
raise RuntimeError("Model file '%s' does not exist; exiting inferencing." % model_file)
if not os.path.isfile(model_json):
raise RuntimeError("Model file '%s' does not exist; exiting inferencing." % model_json)
# load model file
if verbose: print("Retrieving model: %s..." % model_name)
json_file = open(model_json, "r")
model_json_data = json_file.read()
json_file.close()
model = model_from_json(model_json_data)
model.load_weights(model_file)
return model
# Performs standard scaling on a 4D image
def __4d_Scaler(self, arr, ss, fit = False, verbose = False):
"""Performs standard scaling of the 4D array with the 'ss' model provided by caller"""
#Unwinds a (instances, rows, columns, layers) array to 2D for standard scaling
num_instances, num_rows, num_columns, num_layers = arr.shape
arr_copy = np.reshape(arr, (-1, num_columns))
# fit the standard scaler
if fit:
if verbose: print("Fitting SCALER and transforming...")
arr_copy = ss.fit_transform(arr_copy)
else:
if verbose: print("Transforming SCALER only...")
arr_copy = ss.transform(arr_copy)
arr = np.reshape(arr_copy, (num_instances, num_rows, num_columns, num_layers))
return arr
# resnet identity block builder
def __identity_block(self, model, kernel_size, filters, stage, block):
"""modularized identity block for resnet"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(model)
x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)
x = add([x, model])
x = Activation('relu')(x)
return x
# resnet conv block builder
def __conv_block(self, model, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv block builder for resnet"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(model)
x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)
x =Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(model)
shortcut = BatchNormalization(
axis=3, name=bn_name_base + '1')(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
# create a layerable inception module
def __inception_module(self, model, filters_1x1, filters_3x3_reduce, filters_3x3,
filters_5x5_reduce, filters_5x5, filters_pool_proj, kernel_init, bias_init, name = None):
"""modularized inception block for layering"""
# Connection Layer 1 (1x1)
conv_1x1 = Convolution2D(filters_1x1, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
# Connection Layer 2 (3x3)
conv_3x3 = Convolution2D(filters_3x3_reduce, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
conv_3x3 = Convolution2D(filters_3x3, (3, 3), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (conv_3x3)
# Connection Layer 3 (5x5)
conv_5x5 = Convolution2D(filters_5x5_reduce, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
conv_5x5 = Convolution2D(filters_5x5, (5, 5), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (conv_5x5)
# Connection Layer 4 (pool)
pool_proj = MaxPooling2D((3, 3), strides = (1, 1), padding = 'same') (model)
pool_proj = Convolution2D(filters_pool_proj, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (pool_proj)
# Concatenation layer
output = concatenate(inputs = [conv_1x1, conv_3x3, conv_5x5, pool_proj], axis = 3, name = name)
return output
# return an InceptionV3 output tensor after applying Conv2D and BatchNormalization
def __conv2d_bn(self, x, filters, num_row, num_col, padding = 'same', strides = (1, 1), name = None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 3
x = Convolution2D(filters, (num_row, num_col), strides = strides,
padding = padding, use_bias = False, name = conv_name) (x)
x = BatchNormalization(axis = bn_axis, scale = False, name = bn_name) (x)
x = ReLU(name = name) (x)
return x
# a residual block for resnext
def __resnext_block(self, x, filters, kernel_size = 3, stride = 1, groups = 32, conv_shortcut = True, name = None):
if conv_shortcut is True:
shortcut = Conv2D((64 // groups) * filters, 1, strides = stride, use_bias = False, name = name + '_0_conv') (x)
shortcut = BatchNormalization(axis = 3, epsilon=1.001e-5, name = name + '_0_bn') (shortcut)
else:
shortcut = x
x = Conv2D(filters, 1, use_bias = False, name = name + '_1_conv') (x)
x = BatchNormalization(axis = 3, epsilon = 1.001e-5, name = name + '_1_bn') (x)
x = Activation('relu', name = name + '_1_relu') (x)
c = filters // groups
x = ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = DepthwiseConv2D(kernel_size, strides = stride, depth_multiplier = c, use_bias = False, name = name + '_2_conv') (x)
kernel = np.zeros((1, 1, filters * c, filters), dtype = np.float32)
for i in range(filters):
start = (i // c) * c * c + i % c
end = start + c * c
kernel[:, :, start:end:c, i] = 1.
x = Conv2D(filters, 1, use_bias = False, trainable = False, kernel_initializer = {'class_name': 'Constant','config': {'value': kernel}}, name = name + '_2_gconv') (x)
x = BatchNormalization(axis=3, epsilon = 1.001e-5, name = name + '_2_bn') (x)
x = Activation('relu', name=name + '_2_relu') (x)
x = Conv2D((64 // groups) * filters, 1, use_bias = False, name = name + '_3_conv') (x)
x = BatchNormalization(axis = 3, epsilon=1.001e-5, name = name + '_3_bn') (x)
x = Add(name = name + '_add') ([shortcut, x])
x = Activation('relu', name = name + '_out') (x)
return x
# a set of stacked residual blocks for ResNeXt
def __resnext_stack(self, x, filters, blocks, stride1 = 2, groups = 32, name = None, dropout = None):
x = self.__resnext_block(x, filters, stride = stride1, groups = groups, name = name + '_block1')
for i in range(2, blocks + 1):
x = self.__resnext_block(x, filters, groups = groups, conv_shortcut = False,
name = name + '_block' + str(i))
if not dropout is None:
x = Dropout(dropout) (x)
return x
def __bn_relu(self, x, bn_name = None, relu_name = None):
norm = BatchNormalization(axis = 3, name = bn_name) (x)
return Activation("relu", name = relu_name) (norm)
def __bn_relu_conv(self, **conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = self.__bn_relu(x, bn_name = bn_name, relu_name = relu_name)
return Conv2D(filters = filters, kernel_size = kernel_size,
strides = strides, padding = padding,
dilation_rate = dilation_rate,
kernel_initializer = kernel_initializer,
kernel_regularizer = kernel_regularizer,
name = conv_name) (activation)
return f
def __conv_bn_relu(self, **conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
x = Conv2D(filters = filters, kernel_size = kernel_size,
strides = strides, padding = padding,
dilation_rate = dilation_rate,
kernel_initializer = kernel_initializer,
kernel_regularizer = kernel_regularizer,
name = conv_name) (x)
return self.__bn_relu(x, bn_name = bn_name, relu_name = relu_name)
return f
def __block_name_base(self, stage, block):
if block < 27:
block = '%c' % (block + 97) # 97 is the ascii number for lowercase 'a'
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
return conv_name_base, bn_name_base
def __shortcut(self, input_feature, residual, conv_name_base = None, bn_name_base = None):
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[1] / residual_shape[1]))
stride_height = int(round(input_shape[2] / residual_shape[2]))
equal_channels = input_shape[3] == residual_shape[3]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[3],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=3,
name=bn_name_base)(shortcut)
return add([shortcut, residual])
def __basic_block(self, filters, stage, block, transition_strides = (1, 1),
dilation_rate = (1, 1), is_first_block_of_first_layer = False, dropout = None,
residual_unit = None):
def f(input_features):
conv_name_base, bn_name_base = self.__block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters = filters, kernel_size = (3, 3),
strides = transition_strides, dilation_rate = dilation_rate,
padding = "same", kernel_initializer = "he_normal", kernel_regularizer = l2(1e-4),
name = conv_name_base + '2a') (input_features)
else:
x = residual_unit(filters = filters, kernel_size = (3, 3),
strides = transition_strides,
dilation_rate = dilation_rate,
conv_name_base = conv_name_base + '2a',
bn_name_base = bn_name_base + '2a') (input_features)
if dropout is not None:
x = Dropout(dropout) (x)
x = residual_unit(filters = filters, kernel_size = (3, 3),
conv_name_base = conv_name_base + '2b',
bn_name_base = bn_name_base + '2b') (x)
return self.__shortcut(input_features, x)
return f
def __bottleneck(self, filters, stage, block, transition_strides = (1, 1),
dilation_rate = (1, 1), is_first_block_of_first_layer = False, dropout = None,
residual_unit = None):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input_feature):
conv_name_base, bn_name_base = self.__block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_feature)
else:
x = residual_unit(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_feature)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters * 4, kernel_size=(1, 1),
conv_name_base=conv_name_base + '2c',
bn_name_base=bn_name_base + '2c')(x)
return self.__shortcut(input_feature, x)
return f
# builds a residual block for resnet with repeating bottleneck blocks
def __residual_block(self, block_function, filters, blocks, stage, transition_strides = None, transition_dilation_rates = None,
dilation_rates = None, is_first_layer = False, dropout = None, residual_unit = None):
if transition_dilation_rates is None:
transition_dilation_rates = [(1, 1)] * blocks
if transition_strides is None:
transition_strides = [(1, 1)] * blocks
if dilation_rates is None:
dilation_rates = [1] * blocks
def f(x):
for i in range(blocks):
is_first_block = is_first_layer and i == 0
x = block_function(filters=filters, stage=stage, block=i,
transition_strides=transition_strides[i],
dilation_rate=dilation_rates[i],
is_first_block_of_first_layer=is_first_block,
dropout=dropout,
residual_unit=residual_unit)(x)
return x
return f
######################################################
######################################################
######################################################
### KERAS MODEL ZOO
######################################################
######################################################
######################################################
#------------------------------------------------
# NaimishNet Model
# ref: https://arxiv.org/abs/1710.00977
#------------------------------------------------
def get_keras_naimishnet(self, X, Y, batch_size, epoch_count, X_val = None, Y_val = None, val_split = 0.1, shuffle = True,
feature_name = "unknown", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - NaimishNet"
__MODEL_FNAME_PREFIX = "KERAS_NAIMISHNET/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_model_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, "_", feature_name, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
#ke = initializers.lecun_uniform(seed = 42)
ke = 'glorot_uniform'
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
l1 = Input((96, 96, 1))
l2 = Convolution2D(32, (4, 4), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l1)
#l3 = ELU() (l2)
l3 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l2)
l4 = Dropout(rate = 0.1) (l3)
l5 = Convolution2D(64, (3, 3), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l4)
#l7 = ELU() (l6)
l6 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l5)
l7 = Dropout(rate = 0.2) (l6)
l8 = Convolution2D(128, (2, 2), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l7)
#l11 = ELU() (l10)
l9 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l8)
l10 = Dropout(rate = 0.3) (l9)
l11 = Convolution2D(256, (1, 1), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l10)
#l15 = ELU() (l14)
l12 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l11)
l13 = Dropout(rate = 0.4) (l12)
l14 = Flatten() (l13)
l15 = Dense(1000, activation = 'elu') (l14)
#l20 = ELU() (l19)
l16 = Dropout(rate = 0.5) (l15)
#l22 = Dense(1000) (l21)
#l23 = linear(l22)
l17 = Dense(1000, activation = 'linear') (l16)
l18 = Dropout(rate = 0.6) (l17)
l19 = Dense(2) (l18)
model = Model(inputs = [l1], outputs = [l19])
model.compile(optimizer = act, loss = lss, metrics = mtrc)
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', model_name = __MODEL_NAME,
feature_name = feature_name, file_name = __history_plot_file, verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
modparallel_modelel = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_naimishnet(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - NaimishNet"
__MODEL_FNAME_PREFIX = "KERAS_NAIMISHNET/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#------------------------------------------------
# Kaggle1 Model
# Inspired by: https://www.kaggle.com/balraj98/data-augmentation-for-facial-keypoint-detection
#------------------------------------------------
def get_keras_kaggle1(self, X, Y, batch_size, epoch_count, val_split = 0.05, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle1"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE1/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
act = 'adam'
#lss = losses.mean_squared_error
lss = 'mean_squared_error'
#mtrc = [metrics.RootMeanSquaredError()]
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
# Input dimensions: (None, 96, 96, 1)
model.add(Convolution2D(32, (3,3), padding='same', use_bias=False, input_shape=(96,96,1)))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 96, 96, 32)
model.add(Convolution2D(32, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.2))
# Input dimensions: (None, 48, 48, 32)
model.add(Convolution2D(64, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 48, 48, 64)
model.add(Convolution2D(64, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.25))
# Input dimensions: (None, 24, 24, 64)
model.add(Convolution2D(96, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 24, 24, 96)
model.add(Convolution2D(96, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.15))
# Input dimensions: (None, 12, 12, 96)
model.add(Convolution2D(128, (3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 12, 12, 128)
model.add(Convolution2D(128, (3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.3))
# Input dimensions: (None, 6, 6, 128)
model.add(Convolution2D(256, (3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 6, 6, 256)
model.add(Convolution2D(256, (3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.2))
# Input dimensions: (None, 3, 3, 256)
model.add(Convolution2D(512, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Convolution2D(512, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# TEST added 4/8
model.add(Dropout(0.3))
model.add(Convolution2D(1024, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Convolution2D(1024, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
# CDB DROPOUT INCREASED FROM 0.1 to 0.2
model.add(Dropout(0.15))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', #metric = 'root_mean_squared_error',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_kaggle1(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle1"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE1/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):## or (not os.path.isfile(__scaler_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" % ##'%s'\n" %
(__model_file_name, __model_json_file))##, __scaler_file))
# Load the training scaler for this model
##if verbose: print("Loading SCALER for '%s' and zero-centering X." % feature_name)
##scaler = pickle.load(open(__scaler_file, "rb"))
##X = self.__4d_Scaler(arr = X, ss = scaler, fit = False, verbose = verbose)
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# LeNet5 Model
# Inspired by: Google's LeNet5 for MNSIST - Modified
#-------------------------------------------------------------
def get_keras_lenet5(self, X, Y, batch_size, epoch_count, X_val = None, Y_val = None, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - LeNet5"
__MODEL_FNAME_PREFIX = "KERAS_LENET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#if (X_val is None) or (Y_val is None):
# if verbose: print("No validation set specified; creating a split based on %.2f val_split parameter." % val_split)
# X, Y, X_val, Y_val = train_test_split(X, Y, test_size = val_split, random_state = 42)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
model.add(Convolution2D(filters = 6, kernel_size = (3, 3), input_shape = (96, 96, 1)))
model.add(ReLU())
# CDB: 3/5 added Batch Normalization
#model.add(BatchNormalization())
model.add(AveragePooling2D())
#model.add(Dropout(0.2))
model.add(Convolution2D(filters = 16, kernel_size = (3, 3)))
model.add(ReLU())
# CDB: 3/5 added Batch Normalization
#model.add(BatchNormalization())
model.add(AveragePooling2D())
#model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(ReLU())
#model.add(Dropout(0.1))
model.add(Dense(256))
model.add(ReLU())
#model.add(Dropout(0.2))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', model_name = __MODEL_NAME,
feature_name = feature_name, file_name = __history_plot_file, verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_lenet5(self, X, feature_name = "ALL_FEATURES", full = True, verbose = False):
__MODEL_NAME = "Keras - LeNet5"
__MODEL_FNAME_PREFIX = "KERAS_LENET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# Inception V1
# Inspired by : https://arxiv.org/abs/1409.4842
#-------------------------------------------------------------
def get_keras_inception(self, X, Y, batch_size, epoch_count, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, X_val = None, Y_val = None, full = True, verbose = False):
__MODEL_NAME = "Keras - Inception"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTION/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX1_name = "".join([nested_dir, "inception_AUX1_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX2_name = "".join([nested_dir, "inception_AUX2_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__history_params_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file_main = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_main_output_mse_plot.png"])
__history_plot_file_auxilliary1 = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_auxilliary_output_1_mse_plot.png"])
__history_plot_file_auxilliary2 = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_auxilliary_output_2_mse_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_file_AUX1_name)) or (not os.path.isfile(__model_file_AUX2_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % __MODEL_NAME)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp_main = ModelCheckpoint(filepath = __model_file_MAIN_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_main_output_mae')
cp_aux1 = ModelCheckpoint(filepath = __model_file_AUX1_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_auxilliary_output_1_mae')
cp_aux2 = ModelCheckpoint(filepath = __model_file_AUX2_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_auxilliary_output_2_mae')
kernel_init = glorot_uniform()
bias_init = Constant(value = 0.2)
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
# Input image shape (H, W, C)
input_img = Input(shape=(96, 96, 1))
# Top Layer (Begin MODEL)
model = Convolution2D(filters = 64, kernel_size = (7, 7), padding = 'same', strides = (2, 2),
activation = 'relu', name = 'conv_1_7x7/2', kernel_initializer = kernel_init,
bias_initializer = bias_init) (input_img)
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_1_3x3/2') (model)
model = Convolution2D(64, (1, 1), padding = 'same', strides = (1, 1), activation = 'relu', name = 'conv_2a_3x3/1') (model)
model = Convolution2D(192, (3, 3), padding = 'same', strides = (1, 1), activation = 'relu', name = 'conv_2b_3x3/1') (model)
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_2_3x3/2') (model)
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 64,
filters_3x3_reduce = 96,
filters_3x3 = 128,
filters_5x5_reduce = 16,
filters_5x5 = 32,
filters_pool_proj = 32,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_3a')
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 128,
filters_3x3_reduce = 128,
filters_3x3 = 192,
filters_5x5_reduce = 32,
filters_5x5 = 96,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_3b')
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name= 'max_pool_3_3x3/2') (model)
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 192,
filters_3x3_reduce = 96,
filters_3x3 = 208,
filters_5x5_reduce = 16,
filters_5x5 = 48,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_4a')
# CDB 3/5 DROPOUT ADDED
model = Dropout(0.2) (model)
# Begin MODEL1 (auxillary output)
model1 = AveragePooling2D((5, 5), padding = 'same', strides = 3, name= 'avg_pool_4_5x5/2') (model)
model1 = Convolution2D(128, (1, 1), padding = 'same', activation = 'relu') (model1)
model1 = Flatten() (model1)
model1 = Dense(1024, activation = 'relu') (model1)
model1 = Dropout(0.3) (model1)
if full:
model1 = Dense(30, name = 'auxilliary_output_1') (model1)
else:
model1 = Dense(8, name = 'auxilliary_output_1') (model1)
# Resume MODEL w/ Inception
model = self.__inception_module(model,
filters_1x1 = 160,
filters_3x3_reduce = 112,
filters_3x3 = 224,
filters_5x5_reduce = 24,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4b')
model = self.__inception_module(model,
filters_1x1 = 128,
filters_3x3_reduce = 128,
filters_3x3 = 256,
filters_5x5_reduce = 24,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4c')
model = self.__inception_module(model,
filters_1x1 = 112,
filters_3x3_reduce = 144,
filters_3x3 = 288,
filters_5x5_reduce = 32,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4d')
# CDB : 3/5 DROPOUT ADDED
model = Dropout(0.2) (model)
# Begin MODEL2 (auxilliary output)
model2 = AveragePooling2D((5, 5), strides = 3) (model)
model2 = Convolution2D(128, (1, 1), padding = 'same', activation = 'relu') (model2)
model2 = Flatten() (model2)
model2 = Dense(1024, activation = 'relu') (model2)
model2 = Dropout(0.3) (model2)
if full:
model2 = Dense(30, name = 'auxilliary_output_2') (model2)
else:
model2 = Dense(8, name = 'auxilliary_output_2') (model2)
# Resume MODEL w/ Inception
model = self.__inception_module(model,
filters_1x1 = 256,
filters_3x3_reduce = 160,
filters_3x3 = 320,
filters_5x5_reduce = 32,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4e')
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_4_3x3/2') (model)
model = self.__inception_module(model,
filters_1x1 = 256,
filters_3x3_reduce = 160,
filters_3x3 = 320,
filters_5x5_reduce = 32,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_5a')
model = self.__inception_module(model,
filters_1x1 = 384,
filters_3x3_reduce = 192,
filters_3x3 = 384,
filters_5x5_reduce = 48,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_5b')
model = GlobalAveragePooling2D(name = 'avg_pool_5_3x3/1') (model)
model = Dropout(0.3) (model)
# Output Layer (Main)
if full:
model = Dense(30, name = 'main_output') (model)
else:
model = Dense(8, name = 'main_output') (model)
model = Model(input_img, [model, model1, model2], name = 'Inception')
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, [Y, Y, Y], validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main, cp_aux1, cp_aux2], verbose = verbose)
else:
history = parallel_model.fit(X, [Y, Y, Y], validation_data = (X_val, [Y_val, Y_val, Y_val]), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main, cp_aux1, cp_aux2], verbose = verbose)
# print and/or save a performance plot
for m, f in zip(['main_output_mse', 'auxilliary_output_1_mse', 'auxilliary_output_2_mse'],
[__history_plot_file_main, __history_plot_file_auxilliary1, __history_plot_file_auxilliary2]):
try:
self.__plot_keras_history(history = history, metric = m, model_name = __MODEL_NAME,
feature_name = feature_name, file_name = f, verbose = False)
except:
print("error during history plot generation; skipped.")
pass
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
try:
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
except:
print("error during model plot generation; skiopped.")
pass
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL (MAIN) from file '%s'" % (__MODEL_NAME, __model_file_MAIN_name))
main_model = self.__load_keras_model(__MODEL_NAME, __model_file_MAIN_name, __model_json_file, verbose = verbose)
if verbose: print("Loading pickle file for '%s' MODEL (AUX1) from file '%s'" % (__MODEL_NAME, __model_file_AUX1_name))
aux1_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX1_name, __model_json_file, verbose = verbose)
if verbose: print("Loading pickle file for '%s' MODEL (AUX2) from file '%s'" % (__MODEL_NAME, __model_file_AUX2_name))
aux2_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX2_name, __model_json_file, verbose = verbose)
return main_model, aux1_model, aux2_model, hist_params, hist
# inferencing
def predict_keras_inception(self, X, feature_name = "ALL_FEATURES", full = True, verbose = False):
__MODEL_NAME = "Keras - Inception"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTION/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX1_name = "".join([nested_dir, "inception_AUX1_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX2_name = "".join([nested_dir, "inception_AUX2_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_file_AUX1_name)) or (not os.path.isfile(__model_file_AUX2_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n'%s'\n'%s'\n\n" %
(__model_file_MAIN_name, __model_file_AUX1_name, __model_file_AUX2_name, __model_json_file))
# load the Keras model for the specified feature
main_model = self.__load_keras_model(__MODEL_NAME, __model_file_MAIN_name, __model_json_file, verbose = verbose)
aux1_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX1_name, __model_json_file, verbose = verbose)
aux2_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX2_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for 'MAIN' model file..." % len(X))
Y_main = main_model.predict(X, verbose = verbose)
Y_main_columns = [node.op.name for node in main_model.outputs]
if verbose: print("Predicting %d (x,y) coordinates for 'AUX1' model file..." % len(X))
Y_aux1 = aux1_model.predict(X, verbose = verbose)
Y_aux1_columns = [node.op.name for node in aux1_model.outputs]
if verbose: print("Predicting %d (x,y) coordinates for 'AUX2' model file..." % len(X))
Y_aux2 = aux2_model.predict(X, verbose = verbose)
Y_aux2_columns = [node.op.name for node in aux2_model.outputs]
if verbose: print("Predictions completed!")
return Y_main, Y_aux1, Y_aux2, Y_main_columns, Y_aux1_columns, Y_aux2_columns
#------------------------------------------------
# ConvNet5 Simple Model
#------------------------------------------------
def get_keras_convnet5(self, X, Y, batch_size, epoch_count, val_split = 0.1, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - ConvNet5"
__MODEL_FNAME_PREFIX = "KERAS_CONVNET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential(name = 'ConvNet5')
# Input dimensions: (None, 96, 96, 1)
model.add(Convolution2D(16, (3,3), padding = 'same', activation = 'relu', input_shape=(96,96,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(32, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(64, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(128, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(256, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.15))
model.add(Flatten())
model.add(Dense(1024, activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.1))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_convnet5(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - ConvNet5"
__MODEL_FNAME_PREFIX = "KERAS_CONVNET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# Inception V3
# Inspired by : http://arxiv.org/abs/1512.00567
#-------------------------------------------------------------
def get_keras_inceptionv3(self, X, Y, batch_size, epoch_count, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, X_val = None, Y_val = None, full = True, verbose = False):
__MODEL_NAME = "Keras - Inceptionv3"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTIONV3/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__history_params_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file_main = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_main_output_mse_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % __MODEL_NAME)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp_main = ModelCheckpoint(filepath = __model_file_MAIN_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
kernel_init = glorot_uniform()
bias_init = Constant(value = 0.2)
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
# Input image shape (H, W, C)
input_img = Input(shape = (96, 96, 1))
# Begin Inception V3
x = self.__conv2d_bn(x = input_img, filters = 32, num_row = 3, num_col = 3, strides = (2, 2), padding = 'valid')
x = self.__conv2d_bn(x = x, filters = 32, num_row = 3, num_col = 3, strides = (1, 1), padding = 'valid')
x = self.__conv2d_bn(x = x, filters = 64, num_row = 3, num_col = 3, strides = (1, 1), padding = 'same')
x = MaxPooling2D((3, 3), strides = (2, 2)) (x)
x = self.__conv2d_bn(x = x, filters = 80, num_row = 1, num_col = 1, strides = (1, 1), padding = 'valid')
x = self.__conv2d_bn(x = x, filters = 192, num_row = 3, num_col = 3, strides = (1, 1), padding = 'valid')
x = MaxPooling2D((3, 3), strides = (2, 2)) (x)
branch1x1 = self.__conv2d_bn(x = x, filters = 64, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
branch5x5 = self.__conv2d_bn(x = x, filters = 48, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
branch5x5 = self.__conv2d_bn(x = branch5x5, filters = 64, num_row = 5, num_col = 5, strides = (1, 1), padding = 'same')
branch3x3dbl = self.__conv2d_bn(x = x, filters = 64, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
branch3x3dbl = self.__conv2d_bn(x = branch3x3dbl, filters = 96, num_row = 3, num_col = 3, strides = (1, 1), padding = 'same')
branch3x3dbl = self.__conv2d_bn(x = branch3x3dbl, filters = 96, num_row = 3, num_col = 3, strides = (1, 1), padding = 'same')
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(x = branch_pool, filters = 32, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
x = concatenate( [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis = 3, name = 'mixed0')
branch1x1 = self.__conv2d_bn(x, 64, 1, 1)
branch5x5 = self.__conv2d_bn(x, 48, 1, 1)
branch5x5 = self.__conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = self.__conv2d_bn(x, 64, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 64, 1, 1)
x = concatenate( [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis = 3, name = 'mixed1')
branch1x1 = self.__conv2d_bn(x, 64, 1, 1)
branch5x5 = self.__conv2d_bn(x, 48, 1, 1)
branch5x5 = self.__conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = self.__conv2d_bn(x, 64, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 64, 1, 1)
x = concatenate( [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis = 3, name = 'mixed2')
branch3x3 = self.__conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = self.__conv2d_bn(x, 64, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2)) (x)
x = concatenate( [branch3x3, branch3x3dbl, branch_pool], axis = 3, name = 'mixed3')
branch1x1 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(x, 128, 1, 1)
branch7x7 = self.__conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(x, 128, 1, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis = 3, name = 'mixed4')
for i in range(2):
branch1x1 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(x, 160, 1, 1)
branch7x7 = self.__conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(x, 160, 1, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis = 3, name = 'mixed' + str(5 + i))
branch1x1 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(x, 192, 1, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis = 3, name = 'mixed7')
branch3x3 = self.__conv2d_bn(x, 192, 1, 1)
branch3x3 = self.__conv2d_bn(branch3x3, 320, 3, 3,strides=(2, 2), padding='valid')
branch7x7x3 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7x3 = self.__conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = self.__conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = self.__conv2d_bn(branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate( [branch3x3, branch7x7x3, branch_pool], axis = 3, name = 'mixed8')
for i in range(2):
branch1x1 = self.__conv2d_bn(x, 320, 1, 1)
branch3x3 = self.__conv2d_bn(x, 384, 1, 1)
branch3x3_1 = self.__conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = self.__conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = concatenate( [branch3x3_1, branch3x3_2], axis = 3, name = 'mixed9_' + str(i))
branch3x3dbl = self.__conv2d_bn(x, 448, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = self.__conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = self.__conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = concatenate( [branch3x3dbl_1, branch3x3dbl_2], axis = 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch3x3, branch3x3dbl, branch_pool], axis = 3, name = 'mixed' + str(9 + i))
x = GlobalAveragePooling2D(name = 'avg_pool') (x)
x = Dropout(0.3) (x)
if full:
x = Dense(30) (x)
else:
x = Dense(8) (x)
model = Model(input_img, x, name = 'InceptionV3')
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file_main,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
try:
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
except:
print("error during model plot generation; skiopped.")
pass
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = | pd.read_csv(__history_performance_file) | pandas.read_csv |
from catboost import CatBoostRegressor
import pandas as pd
from earthquake import config
def get_features():
return [
'ffti_av_change_rate_roll_mean_1000',
'percentile_roll_std_30_window_50',
'skew',
'percentile_roll_std_10_window_100',
'percentile_roll_std_30_window_50',
'percentile_roll_std_20_window_1000',
'ffti_exp_Moving_average_30000_mean',
'range_3000_4000',
'max_last_10000',
'mfcc_4_avg',
'fftr_percentile_roll_std_80_window_10000',
'percentile_roll_std_1_window_100',
'ffti_abs_trend',
'av_change_abs_roll_mean_50',
'mfcc_15_avg'
]
def submit():
"""Make prediction and prepare results for submission.
"""
features = get_features()
train_set = | pd.read_csv(config.path_to_train) | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy.stats import mode
from tqdm import tqdm
from geopy.geocoders import Nominatim
from datetime import datetime
def handle_bornIn(x):
skip_vals = ['16-Mar', '23-May', 'None']
if x not in skip_vals:
return datetime(2012, 1, 1).year - datetime(int(x), 1, 1).year
else:
return 23
def handle_gender(x):
if x == 'male':
return 1
else:
return 0
def handle_memberSince(x):
skip_vals = ['--None']
if pd.isna(x):
return datetime(2012, 1, 1)
elif x not in skip_vals:
return datetime.strptime(x, '%d-%m-%Y')
else:
return datetime(2012, 1, 1)
def process_tours_df(data_content):
dtype = {}
cols = data_content.tours_df.columns[9:]
for d in cols:
dtype[d] = np.int16
data_content.tours_df = data_content.tours_df.astype(dtype)
data_content.tours_df['area'] = data_content.tours_df['city'] + ' ' + data_content.tours_df['state'] + ' ' + \
data_content.tours_df['pincode'] + ' ' + data_content.tours_df['country']
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.lstrip() if type(x) == str else x)
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.rstrip() if type(x) == str else x)
data_content.tours_df.drop(['city', 'state', 'pincode', 'country'], axis=1, inplace=True)
data_content.tours_df['tour_date'] = data_content.tours_df['tour_date'].apply(
lambda x: datetime(int(x.split('-')[2]), int(x.split('-')[1]), int(x.split('-')[0]), 23, 59))
def process_tour_convoy_df(data_content):
print('Initializing tour_convoy_df...', flush=True)
data_content.tour_convoy_df['total_going'] = 0
data_content.tour_convoy_df['total_not_going'] = 0
data_content.tour_convoy_df['total_maybe'] = 0
data_content.tour_convoy_df['total_invited'] = 0
data_content.tour_convoy_df['fraction_going'] = 0
data_content.tour_convoy_df['fraction_not_going'] = 0
data_content.tour_convoy_df['fraction_maybe'] = 0
known_bikers = set()
lis = ['going', 'not_going', 'maybe', 'invited']
pbar = tqdm(total=data_content.tour_convoy_df.shape[0],
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 1 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
s = [0, 0, 0]
for j, l in enumerate(lis):
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
data_content.tour_convoy_df.loc[idx, 'total_' + l] = len(biker)
if j != 3:
s[j] = len(biker)
for bik in biker:
known_bikers.add(bik)
if sum(s) != 0:
for j in range(3):
data_content.tour_convoy_df.loc[idx, 'fraction_' + lis[j]] = s[j] / sum(s)
pbar.update(1)
pbar.close()
mean = data_content.tour_convoy_df['total_invited'].mean()
std = data_content.tour_convoy_df['total_invited'].std()
data_content.tour_convoy_df['fraction_invited'] = data_content.tour_convoy_df['total_invited'].apply(
lambda x: (x - mean) / std)
biker_tour_convoy_df = dict()
for biker in list(known_bikers):
biker_tour_convoy_df[biker] = [[], [], [], []]
pbar = tqdm(total=data_content.tour_convoy_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 2 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
for l in lis:
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
for bik in biker:
biker_tour_convoy_df[bik][lis.index(l)] += \
[data_content.tour_convoy_df.loc[idx, 'tour_id']]
pbar.update(1)
pbar.close()
for key, _ in biker_tour_convoy_df.items():
for i in range(4):
biker_tour_convoy_df[key][i] = ' '.join(list(set(biker_tour_convoy_df[key][i])))
biker_tour_convoy_df = pd.DataFrame.from_dict(biker_tour_convoy_df, orient='index')
biker_tour_convoy_df.reset_index(inplace=True)
biker_tour_convoy_df.columns = ['biker_id'] + lis
print('tour_convoy_df ready...', flush=True)
return biker_tour_convoy_df
def get_coordinates(locations, data_content):
geolocation_map = {}
locator = Nominatim(user_agent="Kolibri")
for i in tqdm(range(len(locations)),
disable=False,
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
# noinspection PyBroadException
try:
location = locator.geocode(locations[i])
geolocation_map[locations[i]] = [location.latitude, location.longitude]
except:
# Called when there is presumably some noise in the Address location
# noinspection PyBroadException
data_content.noise += [locations[i]]
geolocation_map[locations[i]] = [np.nan, np.nan]
location_df = pd.DataFrame({'location': list(locations),
'latitude': np.array(list(geolocation_map.values()))[:, 0],
'longitude': np.array(list(geolocation_map.values()))[:, 1]})
return geolocation_map, location_df
def initialize_locations(data_content):
# noinspection PyBroadException
try:
location_df = pd.read_csv(data_content.base_dir + 'temp/location.csv')
location_from_csv = True
except:
location_df = None
location_from_csv = False
if location_from_csv:
geolocation = {}
print('Initializing Locations from DataFrame...', flush=True)
for i, l in enumerate(location_df['location'].tolist()):
geolocation[l] = [location_df.loc[i, 'latitude'], location_df.loc[i, 'longitude']]
else:
print('Initializing Locations from Nominatim...', flush=True)
biker_location = data_content.bikers_df['area'].dropna().drop_duplicates().tolist()
geolocation, location_df = get_coordinates(biker_location, data_content)
return geolocation, location_df
def impute_location_from_tour_convoy(data_content):
# From tour_convoy
unk_loc = data_content.bikers_df[ | pd.isna(data_content.bikers_df['latitude']) | pandas.isna |
from scipy.sparse import data
from sklearn.cluster import KMeans
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.manifold import TSNE
import os
import csv
TOP25 = 5
srcPath = 'data/static/time_distribution.csv'
kmeansPath = 'data/static/kmeans.csv'
def getDataset():
# 这是设置取样规则的,如果要重新取样记得删掉data/static/kmeans.csv文件
# if not os.path.exists(kmeansPath):
if True:
X = []
df = pd.read_csv(srcPath)
for idx, row in tqdm(df.iterrows(), total=df.shape[0], desc="Dataset"):
if row['TOTAL'] < 10:
# 取样规则在这里
continue
data = [row['DAWN'], row['MORNING'], row['AFTERNOON'], row['NIGHT']]
X.append(list(map(lambda x: round(x/row['TOTAL'], 2), data)))
with open(os.path.join('data/static', 'kmeans.csv'), 'w+') as f:
writer = csv.writer(f)
writer.writerow(['DAWN', 'MORNING', 'AFTERNOON', 'NIGHT'])
writer.writerows(X)
df = pd.read_csv(kmeansPath)
print("shape is:", df.shape)
return df
def decideK(df):
# 通过找肘点确定分类数量
dataset = []
for idx, row in df.iterrows():
dataset.append([row['DAWN'], row['MORNING'], row['AFTERNOON'], row['NIGHT']])
SSE = []
K = []
for n_cluster in tqdm(range(1, 20), desc='decideK'):
cls = KMeans(n_clusters=n_cluster, max_iter=10000)
cls.fit(dataset)
SSE.append(cls.inertia_)
K.append(n_cluster)
plt.scatter(K, SSE)
plt.plot(K, SSE)
plt.xlabel("K")
plt.ylabel("SSE")
plt.show()
def kmeans(df):
df = | pd.read_csv(kmeansPath) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# Built-ins
import os, sys, time, datetime, uuid, pickle, gzip, bz2, zipfile, requests, operator, warnings, functools
from collections import OrderedDict, defaultdict, Mapping
from io import TextIOWrapper
import xml.etree.ElementTree as ET
import importlib
# Version-specific modules
if sys.version_info.major == 2:
import pathlib2 as pathlib
import string
import bz2file
setattr(bz2, "open", bz2file.open) # Hack to use open with bz2
else:
import pathlib
# External
from tqdm import tqdm, tqdm_notebook, tqdm_gui
import pandas as pd
import numpy as np
# =====
# Formatting
# =====
# Remove pairwise nan
def remove_pairwise_nan(a, b, checks=True):
"""
Remove nan values for a pairwise function
Benchmark:
data:150 dimensionality pd.Series with 1 nan in a
checks=True: 177 µs ± 14.3 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
checks=False: 111 µs ± 1.91 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
"""
if checks:
assert isinstance(a, (np.ndarray, pd.Series))
assert type(a) is type(b), "`a` and `b` must be the same type"
assert a.size == b.size, "`a` and `b` must be the same size. The sizes are {} and {}, respectively".format(a.size, b.size)
if isinstance(a, pd.Series):
assert np.all(a.index == b.index), "`a` and `b` must be the same index"
index = None
name_a = None
name_b = None
if isinstance(a, pd.Series):
index = a.index
name_a = a.name
name_b = b.name
a = a.values
b = b.values
mask_nan = ~np.logical_or(np.isnan(a), np.isnan(b))
a = a[mask_nan]
b = b[mask_nan]
if index is not None:
a = pd.Series(a, index=index[mask_nan], name=name_a)
b = pd.Series(b, index=index[mask_nan], name=name_b)
return (a,b)
# Format pairwise
def format_pairwise(a, b, nans_ok=True, assert_not_empty=True):
"""
Prepare two pd.Series for a pairwise operation by getting overlapping indices and droping nan
"""
# Assert a and b are series
assert isinstance(a, pd.Series)
assert isinstance(b, pd.Series)
# Get overlap of index
index = a.index & b.index
if assert_not_empty:
assert index.size > 0, "There are no overlapping elements between a.index and b.index"
a = a[index]
b = b[index]
number_of_nans = pd.concat([a.isnull(), b.isnull()]).sum()
if number_of_nans > 0:
if nans_ok:
a, b = remove_pairwise_nan(a,b,checks=False)
else:
raise ValueError("`nans_ok=False` and there are {} total `nan` between `a` and `b`".format(number_of_nans))
return a, b
# Format memory
def format_memory(B, unit="infer", return_units=True):
"""
Return the given bytes as a human readable KB, MB, GB, or TB string
1 KB = 1024 Bytes
Adapted from the following source (@whereisalext):
https://stackoverflow.com/questions/12523586/python-format-size-application-converting-b-to-kb-mb-gb-tb/52379087
"""
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
# Human readable
size_in_b = int(B)
size_in_kb = B/KB
size_in_mb = B/MB
size_in_gb = B/GB
size_in_tb = B/TB
if return_units:
size_in_b = '{0} B'.format(size_in_b)
size_in_kb = '{0:.3f} KB'.format(size_in_kb)
size_in_mb = '{0:.3f} MB'.format(size_in_mb)
size_in_gb = '{0:.3f} GB'.format(size_in_gb)
size_in_tb = '{0:.3f} TB'.format(size_in_tb)
unit = unit.lower()
assert_acceptable_arguments(unit.lower(), {"infer", "b", "kb", "mb", "gb", "tb"})
if unit != "infer":
return {"b":size_in_b, "kb":size_in_kb, "mb":size_in_mb, "gb":size_in_gb, "tb":size_in_tb}[unit]
else:
if B < KB:
return size_in_b
elif KB <= B < MB:
return size_in_kb
elif MB <= B < GB:
return size_in_mb
elif GB <= B < TB:
return size_in_gb
elif TB <= B:
return size_in_tb
# Get duration
def format_duration(t0):
"""
Adapted from @john-fouhy:
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string
"""
duration = time.time() - t0
hours, remainder = divmod(duration, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02}:{:02}:{:02}".format(int(hours), int(minutes), int(seconds))
# Format file path
def format_path(path, into=str, absolute=False):
assert not is_file_like(path), "`path` cannot be file-like"
if hasattr(path, "absolute"):
path = str(path.absolute())
if hasattr(path, "path"):
path = str(path.path)
if absolute:
path = os.path.abspath(path)
return into(path)
# Format header for printing
def format_header(text, line_character="=", n=None):
if n is None:
n = len(text)
line = n*line_character
return "{}\n{}\n{}".format(line, text, line)
# ============
# Dictionaries
# ============
# Dictionary as a tree
def dict_tree():
"""
Source: https://gist.github.com/hrldcpr/2012250
"""
return defaultdict(dict_tree)
# Reverse a dictionary
def dict_reverse(d):
into = type(d)
data = [(v,k) for k,v in d.items()]
return into(data)
# Expand dictionary
def dict_expand(d, into=pd.Series, **kwargs):
"""
Convert {group:[elements]} ==> {element[i]:group[j]}
"""
return into(OrderedDict((r,p) for p,q in d.items() for r in q), **kwargs)
# Fill dictionary
def dict_fill(d, index, filler_value=np.nan, into=dict):
data = [(k,filler_value) for k in index if k not in d] + list(d.items())
return into(data)
# Build a dictionary from repeated elements
def dict_build(input_data, into=dict):
"""
input_data: [(value, iterable)]
d_output: {key_in_iterable:value}
"""
d_output = OrderedDict()
for value, iterable in input_data:
for key in iterable:
d_output[key] = value
return into(d_output)
# Fold dictionary
def dict_collapse(d, into=dict):
"""
Folds dictionary into dict of lists
"""
d_collapsed = defaultdict(list)
for k,v in d.items():
d_collapsed[v].append(k)
return into(d_collapsed)
# Subset a dictionary
def dict_filter(d, keys, into=dict):
"""
keys can be an iterable or function
"""
if hasattr(keys, "__call__"):
f = keys
keys = filter(f, d.keys())
return into(map(lambda k:(k,d[k]), keys))
# Convert python dictionary to bash
def dict_py_to_bash(d, bash_obj_name="DICT"):
"""
Adapted from source:
* https://stackoverflow.com/questions/1494178/how-to-define-hash-tables-in-bash
Converts a Python dictionary or pd.Series to a bash dictionary.
"""
bash_placeholder = "declare -A {}=(".format(bash_obj_name)
for k,v in d.items():
bash_placeholder += ' ["{}"]="{}"'.format(k,v)
bash_placeholder += ")"
return bash_placeholder
# ===========
# Assertions
# ===========
def assert_acceptable_arguments(query, target, operation="le", message="Invalid option provided. Please refer to the following for acceptable arguments:"):
"""
le: operator.le(a, b) : <=
eq: operator.eq(a, b) : ==
ge: operator.ge(a, b) : >=
"""
# If query is not a nonstring iterable or a tuple
if any([
not is_nonstring_iterable(query),
isinstance(query,tuple),
]):
query = [query]
query = set(query)
target = set(target)
func_operation = getattr(operator, operation)
assert func_operation(query,target), "{}\n{}".format(message, target)
# Check packages
def check_packages(packages, namespace=None, language="python", import_into_backend=False, verbose=False):
"""
Check if packages are available (and import into global namespace)
Handles python and R packages via rpy2
If package is a tuple then imports as follows: ("numpy", "np") where "numpy" is full package name and "np" is abbreviation
If R packages are being checked, please install rpy2
To import packages into current namespace: namespace = globals()
To import packages in backend, e.g. if this is used in a module/script, use `import_into_backend`
packages: str, non-tuple iterable
usage:
@check_packages(["sklearn", "scipy"], language="python")
def f():
pass
@check_packages(["ape"], language="r")
def f():
pass
"""
# Force packages into sorted non-redundant list
if isinstance(packages,(str, tuple)):
packages = [packages]
packages = set(packages)
# Set up decorator for Python imports
if language.lower() == "python":
import_package = importlib.import_module
importing_error = ImportError
# Set up decorator for R imports
if language.lower() == "r":
try:
import rpy2
except ImportError:
raise Exception("Please install 'rpy2' to import R packages")
from rpy2.robjects.packages import importr
from rpy2 import __version__ as rpy2_version
rpy2_version_major = int(rpy2_version.split(".")[0])
assert rpy2_version_major > 1, "Please update your rpy2 version"
if rpy2_version_major == 2:
from rpy2.rinterface import RRuntimeError
importing_error = RRuntimeError
if rpy2_version_major == 3:
# from rpy2.rinterface_lib.embedded import RRuntimeError
from rpy2.robjects.packages import PackageNotInstalledError
importing_error = PackageNotInstalledError
import_package = importr
# Wrapper
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
missing_packages = []
for pkg in packages:
if isinstance(pkg, tuple):
assert len(pkg) == 2, "If a package is tuple type then it must have 2 elements e.g. ('numpy', 'np')"
pkg_name, pkg_variable = pkg
else:
pkg_name = pkg_variable = pkg
try:
package = import_package(pkg_name)
if import_into_backend:
globals()[pkg_variable] = package
if namespace is not None:
namespace[pkg_variable] = package
if verbose:
print("Importing {} as {}".format(pkg_name, pkg_variable), True, file=sys.stderr)
except importing_error:
missing_packages.append(pkg_name)
if verbose:
print("Cannot import {}:".format(pkg_name), False, file=sys.stderr)
assert not missing_packages, "Please install the following {} packages to use this function:\n{}".format(language.capitalize(), ", ".join(missing_packages))
return func(*args, **kwargs)
return wrapper
return decorator
# ===========
# Types
# ===========
def is_function(obj):
return hasattr(obj, "__call__")
def is_file_like(obj):
return hasattr(obj, "read")
def is_dict(obj):
return isinstance(obj, Mapping)
def is_nonstring_iterable(obj):
condition_1 = hasattr(obj, "__iter__")
condition_2 = not type(obj) == str
return all([condition_1,condition_2])
def is_dict_like(obj):
condition_1 = is_dict(obj)
condition_2 = isinstance(obj, pd.Series)
return any([condition_1, condition_2])
def is_all_same_type(iterable):
iterable_types = set(map(lambda obj:type(obj), iterable))
return len(iterable_types) == 1
def is_number(x, num_type = np.number):
return np.issubdtype(type(x), num_type)
def is_query_class(x,query, case_sensitive=False):
# Format single search queries
if type(query) == str:
query = [query]
# Remove case if necessary
x_classname = str(x.__class__)
if not case_sensitive:
x_classname = x_classname.lower()
query = map(lambda q:q.lower(),query)
# Check if any of the tags in query are in the input class
verdict = any(q in x_classname for q in query)
return verdict
def is_path_like(obj, path_must_exist=True):
condition_1 = type(obj) == str
condition_2 = hasattr(obj, "absolute")
condition_3 = hasattr(obj, "path")
obj_is_path_like = any([condition_1, condition_2, condition_3])
if path_must_exist:
if obj_is_path_like:
return os.path.exists(obj)
else:
return False
else:
return obj_is_path_like
def is_in_namespace(variable_names, namespace, func_logic=all):
"""
Check if variable names are in the namespace (i.e. globals())
"""
assert hasattr(variable_names, "__iter__"), "`variable_names` should be either a single string on an object or an iterable of strings of variable names"
if isinstance(variable_names, str):
variable_names = [variable_names]
namespace = set(namespace)
return func_logic(map(lambda x: x in namespace, variable_names))
def is_symmetrical(X, tol=None):
assert len(X.shape) == 2 , "`X` must be 2-dimensional"
assert X.shape[0] == X.shape[1], "`X` must be square"
X = X.copy()
if isinstance(X, pd.DataFrame):
X = X.values
np.fill_diagonal(X, 0)
if tol is None:
return np.all(np.tril(X) == np.triu(X).T)
if tol:
return (np.tril(X) - np.triu(X).T).ravel().min() < tol
def is_graph(obj, attr="has_edge"):
return hasattr(obj, attr)
@check_packages(["matplotlib"])
def is_color(obj):
from matplotlib.colors import to_rgba
# Note: This can't handle values that are RGB in (0-255) only (0,1)
try:
to_rgba(obj)
return True
except ValueError:
verdict = False
if is_nonstring_iterable(obj):
if all(isinstance(c, (float, int)) for c in obj):
# Check [0,1]
if all(0 <= c <= 1 for c in obj):
verdict = True
# # Check [0,255]
# if not verdict:
# if all(0 <= c <= 255 for c in obj):
# verdict = True
return verdict
@check_packages(["matplotlib"])
def get_color_midpoint(a, b, alpha=1.0, return_type="hex"):
from matplotlib.colors import to_rgba, to_hex
assert_acceptable_arguments(return_type, {"rgba", "rgb", "hex"})
a = to_rgba(a, alpha=alpha)
b = to_rgba(b, alpha=alpha)
c = tuple(np.stack([a,b]).mean(axis=0))
if return_type == "rgba":
return c
if return_type == "rgb":
return c[:-1]
if return_type == "hex":
return to_hex(c)
# =======
# Utility
# =======
# Infer compression
def infer_compression(path):
path = format_path(path)
compression = None
ext_zip = (".zip")
ext_gzip = (".gz", ".gzip", ".pgz")
ext_bz2 = (".bz2", ".bzip2", ".pbz2")
if path.endswith(ext_gzip):
compression= "gzip"
if path.endswith(ext_bz2):
compression = "bz2"
if path.endswith(ext_zip):
compression = "zip"
return compression
# Get current timestamp
def get_timestamp(fmt="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.utcnow().strftime(fmt)
# Wrapper for tqdm
def pv(iterable, description=None, version=None, total=None, unit='it'):
"""
Progress viewer
Wrapper for `tqdm`:
https://github.com/tqdm/tqdm
"""
assert_acceptable_arguments([version], {None, "gui", "notebook"})
func = tqdm
if version == "notebook":
func = tqdm_notebook
if version == "gui":
func = tqdm_gui
return tqdm(
iterable,
desc=description,
total=total,
unit=unit,
)
# Creates a unique identifier
def get_unique_identifier():
return uuid.uuid4().hex
def contains(query, include, exclude=None):
"""
Is anything from `include` in `query` that doesn't include anything from `exclude`
`query` can be any iterator that is not a generator
"""
if isinstance(include, str):
include = [include]
condition_A = any(x in query for x in include)
if exclude is not None:
if type(exclude) == str:
exclude = [exclude]
condition_B = all(x not in query for x in exclude)
return all([condition_A, condition_B])
else:
return condition_A
# Consecutive replace on a string
def consecutive_replace(x, *patterns):
if len(patterns) == 1:
patterns = patterns[0]
for (a,b) in patterns:
x = x.replace(a,b)
return x
# Boolean
def boolean(x, true_values={"true", "t", "yes", "1"}, false_values={"false", "f", "no", "0"}, assertion_message="Please choose either: 'True' or 'False'"):
"""
Not case sensitive
"""
x = str(x).lower()
option = None
if x in list(map(str,true_values)):
option = True
if x in list(map(str,false_values)):
option = False
assert option is not None, assertion_message
return option
# Truncate a float by a certain precision
def to_precision(x, precision=5, into=float):
return into(("{0:.%ie}" % (precision-1)).format(x))
# Left padding
def pad_left(x, block_size=3, fill=0):
"""
Pad a string representation of digits
"""
if len(x) > block_size:
return x
else:
right = np.array(list(str(x)))
left = np.repeat(str(fill), block_size - right.size )
return "".join(np.concatenate([left, right]))
# Join as strings
def join_as_strings(delimiter="_", *args):
return delimiter.join(list(map(str, args)))
# =============
# Iterables
# =============
# Fragment a sequence string
def fragment(seq, K=5, step=1, overlap=False):
K = int(K)
step = int(step)
if not overlap:
step = K
iterable = range(0, len(seq) - K + 1, step)
for i in iterable:
frag = seq[i:i+K]
yield frag
# Get depth of an iterable
def iterable_depth(arg, exclude=None):
# Adapted from the following SO post:
# https://stackoverflow.com/questions/6039103/counting-depth-or-the-deepest-level-a-nested-list-goes-to
# @marco-sulla
exclude = set([str])
if exclude is not None:
if not hasattr(exclude, "__iter__"):
exclude = [exclude]
exclude.update(exclude)
if isinstance(arg, tuple(exclude)):
return 0
try:
if next(iter(arg)) is arg: # avoid infinite loops
return 1
except TypeError:
return 0
try:
depths_in = map(lambda x: iterable_depth(x, exclude), arg.values())
except AttributeError:
try:
depths_in = map(lambda x: iterable_depth(x, exclude), arg)
except TypeError:
return 0
try:
depth_in = max(depths_in)
except ValueError:
depth_in = 0
return 1 + depth_in
# Flatten nested iterables
def flatten(nested_iterable, into=list, unique=False, **kwargs_iterable):
# Adapted from @wim:
# https://stackoverflow.com/questions/16312257/flatten-an-iterable-of-iterables
def _func_recursive(nested_iterable):
for x in nested_iterable:
if is_nonstring_iterable(x):
for element in flatten(x):
yield element
else:
yield x
# Unpack data
data_flattened = list(_func_recursive(nested_iterable))
if unique:
data_flattened = sorted(set(data_flattened))
# Convert type
return into(data_flattened, **kwargs_iterable)
# Range like input data
def range_like(data, start=0):
return np.arange(len(data)) + start
# Set Intersection
def intersection(*iterables, **kwargs):
sets = map(set, iterables)
if "into" in kwargs: # Py2/3 Compatability
into = kwargs.pop("into")
else:
into = set
return into(set.intersection(*sets), **kwargs)
# Set Union
def union(*iterables, **kwargs):
sets = map(set, iterables)
if "into" in kwargs: # Py2/3 Compatability
into = kwargs.pop("into")
else:
into = set
return into(set.union(*sets), **kwargs)
# =========
# I/O
# =========
def read_from_clipboard(sep="\n", into=list):
data = pd.io.clipboard.clipboard_get()
if sep is not None:
return into(filter(bool, map(lambda x:x.strip(), data.split(sep))))
else:
return data
# Read dataframe
def read_dataframe(path, sep="infer", index_col=0, header=0, compression="infer", pickled="infer", func_index=None, func_columns=None, evaluate_columns=None, engine="c", verbose=False, excel="infer", infer_series=False, sheet_name=None, **kwargs):
start_time= time.time()
path = format_path(path, str)
dir , ext = os.path.splitext(path)
ext = ext.lower()
if excel == "infer":
if ext in {".xlsx", ".xls"}:
excel = True
else:
excel = False
if excel:
if "sheetname" in kwargs:
sheet_name = kwargs.pop("sheetname")
warnings.warn("Use `sheet_name` instead of `sheetname`", warnings.DeprecationWarning)
df = pd.read_excel(path, sheet_name=sheet_name, index_col=index_col, header=header, **kwargs)
else:
# Seperator
if any(list(map(lambda x:ext.endswith(x),[".csv", "csv.gz", "csv.zip"]))):
sep = ","
else:
sep = "\t"
# Serialization
if pickled == "infer":
if ext in {".pkl", ".pgz", ".pbz2"}:
pickled = True
else:
pickled = False
# Compression
if compression == "infer":
if pickled:
if ext == ".pkl":
compression = None
if ext == ".pgz":
compression = "gzip"
if ext == ".pbz2":
compression = "bz2"
else:
if ext == ".gz":
compression = "gzip"
if ext == ".bz2":
compression = "bz2"
if ext == ".zip":
compression = "zip"
if pickled:
df = pd.read_pickle(path, compression=compression, **kwargs)
else:
df = pd.read_csv(path, sep=sep, index_col=index_col, header=header, compression=compression, engine=engine, **kwargs)
condition_A = any([(excel == False), (sheet_name is not None)])
condition_B = all([(excel == True), (sheet_name is None)])
if condition_A:
# Map indices
if func_index is not None:
df.index = df.index.map(func_index)
if func_columns is not None:
df.columns = df.columns.map(func_columns)
if evaluate_columns is not None:
for field_column in evaluate_columns:
try:
df[field_column] = df[field_column].map(eval)
except ValueError:
if verbose:
print("Could not use `eval` on column=`{}`".format(field_column), file=sys.stderr)
if verbose:
print("{} | Dimensions: {} | Time: {}".format(
path.split('/')[-1],
df.shape,
format_duration(start_time),
), file=sys.stderr)
if condition_B:
if verbose:
print("{} | Sheets: {} | Time: {}".format(
path.split('/')[-1],
len(df),
format_duration(start_time),
), file=sys.stderr)
if infer_series:
if df.shape[1] == 1:
df = df.iloc[:,0]
return df
# Write dataframe
def write_dataframe(data, path, sep="\t", compression="infer", pickled="infer", excel="infer", **kwargs):
start_time = time.time()
path = format_path(path, str)
_ , ext = os.path.splitext(path)
dir, filename = os.path.split(path)
if not os.path.exists(dir):
dir = str(pathlib.Path(dir).absolute())
os.makedirs(dir, exist_ok=True)
# Excel
if excel == "infer":
if ext in {".xlsx", ".xls"}:
excel = True
else:
excel = False
if excel:
if not is_dict(data):
data = {"Sheet1":data}
writer = pd.ExcelWriter(path)
for sheet_name, df in data.items():
df.to_excel(writer, sheet_name=str(sheet_name), **kwargs)
writer.save()
else:
# Serialization
if pickled == "infer":
if ext in {".pkl", ".pgz", ".pbz2"}:
pickled = True
else:
pickled = False
# Compression
if compression == "infer":
if pickled:
if ext == ".pkl":
compression = None
if ext == ".pgz":
compression = "gzip"
if ext == ".pbz2":
compression = "bz2"
else:
compression = None
if ext == ".gz":
compression = "gzip"
if ext == ".bz2":
compression = "bz2"
if pickled:
data.to_pickle(path, compression=compression, **kwargs)
else:
data.to_csv(path, sep=sep, compression=compression, **kwargs)
# Create file object
def get_file_object(path, mode="infer", compression="infer", safe_mode="infer", verbose=True):
"""
with get_file_object("./test.txt.zip", mode="infer", verbose=False) as f_read:
with get_file_object("./test_write.txt.bz2", mode="write", verbose=False) as f_write:
for line in f_read.readlines():
line = str(line.strip())
print(line, file=f_write)
"""
# Init
f = None
file_will_be_written = False
# Paths
path = format_path(path)
path_exists = os.path.exists(path)
if compression == "infer":
compression = infer_compression(path)
if verbose:
print("Inferring compression:", compression, file=sys.stderr)
# Inferring mode
if mode == "infer": # Create new function for this? infer_filemode?
if path_exists:
mode = "read"
else:
mode = "write"
assert mode != "infer", "The mode should be inferred by this point. Please specify mode manually."
assert compression != "infer", "The compression should be inferred by this point. Please specify compression manually."
# Generic read write
if mode in ["write", "read"]:
if mode == "write":
mode = "w"
if mode == "read":
mode = "r"
if compression in ["gzip", "bz2"]:
mode = mode + "b"
if verbose:
print("Inferring mode:", mode, file=sys.stderr)
if verbose:
if mode == "r":
print("Reading file:",path, file=sys.stderr)
if mode == "w":
print("Writing file:",path, file=sys.stderr)
# Will a file be written?
if "w" in mode:
file_will_be_written = True
# Ensure zip is not being written
if file_will_be_written:
assert compression != "zip", "Currently cannot handle writing zip files. Please use gzip, bz2, or None."
# Future do this:
# https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile.open
# Safe mode
if safe_mode == "infer":
if file_will_be_written:
safe_mode = True
else:
safe_mode = False
assert safe_mode in {True,False}, "Please choose either True or False for `safe_mode`"
if safe_mode:
if all([file_will_be_written, path_exists]):
raise Exception("Safe Mode: Please explicitly provide a writeable mode ['w', 'wb', or 'write'] because `{}` already exists and will be rewritten.".format(path))
# GZIP compression
if compression == "gzip":
f = gzip.open(path, mode)
f.read1 = f.read # Hack from https://github.com/kislyuk/eight/issues/1
# BZ2 compression
if compression == "bz2":
f = bz2.open(path, mode)
if compression == "zip":
filename, ext = os.path.splitext(os.path.split(path)[-1])
f = zipfile.ZipFile(path,mode).open(filename)
# No compression
if f is None:
return open(path, mode)
# Reading and writing compressed files
else:
return TextIOWrapper(f, encoding="utf-8")
# Text reading wrapper
def read_textfile(path, enum=False, generator=True, mode="read", compression="infer", into=pd.Series):
"""
2018-May-29
Edits: 2018-December-27: Added `get_file_object` dependency
"""
assert mode not in ["w", "wb", "a"], "`mode` should not be in {w, wb, a} because it will overwrite"
assert compression in ["infer", "gzip", "bz2", "zip", None], "Valid `compression` types are 'infer', 'gzip', 'bz2', 'zip'"
# Format file path
path = format_path(path, str)
# Get file object
handle = get_file_object(path=path, mode=mode, compression=compression, safe_mode=False, verbose=False)
# Nested functions
def run_return_list(handle):
data = handle.read().split("\n")
handle.close()
if into == pd.Series:
return pd.Series(data, name=path)
else:
if enum:
return into(list(enumerate(data)))
if not enum:
return into(data)
def run_return_iterable(handle):
if enum:
for i,line in enumerate(handle):
yield i, line.strip()
if not enum:
for line in handle:
yield line.strip()
handle.close()
# Controller
if generator:
return run_return_iterable(handle=handle)
if not generator:
return run_return_list(handle=handle)
# Reading serial object
def read_object(path, compression="infer", serialization_module=pickle):
path = format_path(path, str)
if compression == "infer":
_ , ext = os.path.splitext(path)
if (ext == ".pkl") or (ext == ".dill"):
compression = None
if ext in {".pgz", ".gz"}:
compression = "gzip"
if ext in {".pbz2", ".bz2"}:
compression = "bz2"
if compression is not None:
if compression == "gzip":
f = gzip.open(path, "rb")
if compression == "bz2":
f = bz2.open(path, "rb")
else:
f = open(path, "rb")
obj = serialization_module.load(f)
f.close()
return obj
# Writing serial object
def write_object(obj, path, compression="infer", serialization_module=pickle, protocol=None, *args):
"""
Extensions:
pickle ==> .pkl
dill ==> .dill
gzipped-pickle ==> .pgz
bzip2-pickle ==> .pbz2
"""
assert obj is not None, "Warning: `obj` is NoneType"
path = format_path(path, str)
# Use infer_compression here
if compression == "infer":
_ , ext = os.path.splitext(path)
if ext in {".pkl", ".dill"}: # if ext in (ext == ".pkl") or (ext == ".dill"):
compression = None
if ext in {".pgz", ".gz"}:
compression = "gzip"
if ext in {".pbz2", ".bz2"}:
compression = "bz2"
if compression is not None:
if compression == "bz2":
f = bz2.BZ2File(path, "wb")
if compression == "gzip":
f = gzip.GzipFile(path, "wb")
else:
f = open(path, "wb")
serialization_module.dump(obj, f, protocol=protocol, *args)
f.close()
# Reading html from website
def read_url(url, params=None, **kwargs):
"""
Future:
Make wrapper for dynamic html and phantom js
"""
dynamic = False
if not dynamic:
r = requests.get(url, params=params, **kwargs)
return r.text
else:
print("Unavailable: Need to make wrapper for dynamic HTML using PhantomJS", file=sys.stderr)
# Importing a functions from a module
def read_script_as_module(name_module, path):
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
path = format_path(path, str)
if sys.version_info.major == 2:
module = imp.load_source(name_module, path)
else:
spec = importlib.util.spec_from_file_location(name_module, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# Read Fasta File
@check_packages(["Bio"], import_into_backend=False)
def read_fasta(path, description=True, case=None, func_header=None, into=pd.Series, compression="infer", name=None, verbose=False):
"""
Reads in a single fasta file or a directory of fasta files into a dictionary.
"""
from Bio.SeqIO.FastaIO import SimpleFastaParser
# Get path
path = format_path(path)
# Assign pathname as name if there isn't one
if name is None:
name = path
# Open file object
f = get_file_object(path, mode="read", compression=compression, safe_mode=False, verbose=False)
# Read in fasta
d_id_seq = OrderedDict()
if verbose:
seq_records = pv(SimpleFastaParser(f), "Reading sequence file: {}".format(path))
else:
seq_records = SimpleFastaParser(f)
# Verbose but faster
if description:
if case == "lower":
for header, seq in seq_records:
seq = seq.lower()
d_id_seq[header] = seq
if case == "upper":
for header, seq in seq_records:
seq = seq.upper()
d_id_seq[header] = seq
if case is None:
for header, seq in seq_records:
d_id_seq[header] = seq
if not description:
if case == "lower":
for header, seq in seq_records:
seq = seq.lower()
header = header.split(" ")[0]
d_id_seq[header] = seq
if case == "upper":
for header, seq in seq_records:
seq = seq.upper()
header = header.split(" ")[0]
d_id_seq[header] = seq
if case is None:
for header, seq in seq_records:
header = header.split(" ")[0]
d_id_seq[header] = seq
# Close File
f.close()
# Transform header
if func_header is not None:
d_id_seq = OrderedDict( [(func_header(id),seq) for id, seq in d_id_seq.items()])
sequences = into(d_id_seq)
if hasattr(sequences, "name"):
sequences.name = name
return sequences
# Writing sequence files
def write_fasta(sequences, path, compression="infer"):
"""
Sequence stats:
count 29999.000000
mean 310.621754
std 1339.422833
min 56.000000
25% 75.000000
50% 111.000000
75% 219.000000
max 54446.000000
Benchmarks:
No compression: 47.2 ms ± 616 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
Gzip: 9.85 s ± 261 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Bzip2: 864 ms ± 16.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
# path = format_path(path)
# if compression == "infer":
# compression = infer_compression(path)
if is_query_class(path, ["stdout", "stderr", "streamwrapper"]):
path.writelines(">{}\n{}\n".format(id, seq) for id, seq in sequences.items())
else:
with get_file_object(path, mode="write", compression=compression, safe_mode=False, verbose=False) as f:
f.writelines(">{}\n{}\n".format(id, seq) for id, seq in sequences.items())
# Read blast output
def read_blast(path, length_query=None, length_subject=None, sort_by="bitscore"):
"""
if 12: ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
if 15 assumes: -outfmt '6 std qlen slen stitle': ["std", "qlen", "slen", "stitle"]
####################################################
Column NCBI name Description
1 qseqid Query Seq-id (ID of your sequence)
2 sseqid Subject Seq-id (ID of the database hit)
3 pident Percentage of identical matches
4 length Alignment length
5 mismatch Number of mismatches
6 gapopen Number of gap openings
7 qstart Start of alignment in query
8 qend End of alignment in query
9 sstart Start of alignment in subject (database hit)
10 send End of alignment in subject (database hit)
11 evalue Expectation value (E-value)
12 bitscore Bit score
13 sallseqid All subject Seq-id(s), separated by a ';'
14 score Raw score
15 nident Number of identical matches
16 positive Number of positive-scoring matches
17 gaps Total number of gaps
18 ppos Percentage of positive-scoring matches
19 qframe Query frame
20 sframe Subject frame
21 qseq Aligned part of query sequence
22 sseq Aligned part of subject sequence
23 qlen Query sequence length
24 slen Subject sequence length
25 salltitles All subject title(s), separated by a '<>'
Example inputs:
* blat -prot Yeast/Saccharomyces_cerevisiae.R64-1-1.pep.all.processed.fa Phaeodactylum_tricornutum.ASM15095v2.pep.all.processed.fa -out=blast8 yeast-pt.blast8
* diamond blastp -d ../../../../reference_data/references/gracilibacteria/reference_proteins.nmnd -q ./prodigal_output/orfs.faa -f 6 -o ./diamond_output/output.blast6
"""
path = format_path(path)
idx_default_fields = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
# ["query_id", "subject_id", "identity", "alignment_length", "mismatches", "gap_openings", "query_start", "query_end", "subject_start", "subject_end", "e-value", "bit_score"]
df_blast = pd.read_csv(path, header=None, sep="\t")
if df_blast.shape[1] == 12:
df_blast.columns = idx_default_fields
if df_blast.shape[1] == 15:
df_blast.columns = idx_default_fields + ["qlen", "slen", "stitle"]
# Length of query
if length_query is not None:
if is_path_like(length_query):
length_query = read_fasta(length_query, description=False, verbose=False)
if isinstance(length_query[0], str):
length_query = length_query.map(len)
df_blast["qlength"] = df_blast["qseqid"].map(lambda id: length_query[id])
df_blast["qratio"] = (df_blast["qend"] - df_blast["qstart"])/df_blast["qlength"]
# Length of subject
if length_subject is not None:
if is_path_like(length_subject):
length_subject = read_fasta(length_subject, description=False, verbose=False)
if isinstance(length_subject[0], str):
length_subject = length_subject.map(len)
df_blast["slength"] = df_blast["sseqid"].map(lambda id: length_subject[id])
df_blast["sratio"] = (df_blast["send"] - df_blast["sstart"])/df_blast["slength"]
if sort_by is not None:
df_blast = df_blast.sort_values(by=sort_by, ascending=False).reset_index(drop=True)
return df_blast
# Helper function for reading gtf and gff3
def read_gtf_gff_base(path, compression, record_type, verbose):
# Read the gff3 file
with get_file_object(path, mode="read", compression=compression, safe_mode=False, verbose=False) as f:
if verbose:
iterable_lines = pv(f.readlines(), "Processing lines")
else:
iterable_lines = f.readlines()
data= list()
if record_type is None:
for line in iterable_lines:
if not line.startswith("#"):
line = line.strip("\n")
if bool(line):
base_fields = line.split("\t")
data.append(base_fields)
else:
for line in iterable_lines:
if not line.startswith("#"):
if "{}_id".format(record_type) in line:
line = line.strip("\n")
base_fields = line.split("\t")
data.append(base_fields)
# Generate table
df_base = | pd.DataFrame(data) | pandas.DataFrame |
import pytest
from gui.controller.MainController import MainController
from unittest.mock import Mock
from PyQt5.QtCore import QSettings, QDate
from service.visualization.PlotConfiguration import PlotConfiguration
import datetime
import pandas as pd
def test_maincontroller_constructor(qtbot, mocker):
# Given
model = Mock()
model.max_threads = 12
model.auto_scraping = False
mocker.patch('os.listdir', return_value=['2017-02-02', '2017/02/03', '2017-42-03'])
# When
try:
MainController(model)
# Then
except Exception:
pytest.fail("Could not create MainController")
def test_maincontroller_get_analysis_method(qtbot):
# Given
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
# When
methods = controller.get_analysis_methods()
# Then
assert methods == ['nltk', 'textblob']
def test_main_controller_change_automatic_scrapper(qtbot):
# Given
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
# When
try:
controller.change_automatic_scrapper()
# Then
except Exception:
pytest.fail("Changing to automaic scrapper should not raise")
def test_init_settings_on_not_previusly_loaded(mocker):
# Given
mocker.patch('PyQt5.QtCore.QSettings.value', side_effect=[BaseException(), 5])
mock_sync = mocker.patch('PyQt5.QtCore.QSettings.sync')
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
mock_sync.assert_called_once()
def test_set_settings(mocker):
settings = QSettings('Test', 'Test settings')
mocker.patch('PyQt5.QtCore.QSettings.value', return_value=12)
mock_sync = mocker.patch('PyQt5.QtCore.QSettings.sync')
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
controller.set_settings(settings)
mock_sync.assert_called_once()
def test_get_settings(mocker):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
result = controller.get_settings()
assert result is not None
def test_save_plot_config(mocker):
mocker_patch = mocker.patch('pickle.dump')
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
controller.save_plot_config('test_dump.py')
mocker_patch.assert_called_once()
def test_open_configure(mocker):
config = PlotConfiguration('name', 'ini', 'fin', 'map', 'var', 'ind', 'data')
mocker.patch('gui.view.plot_config.PlotConfigure.show')
mocker.patch('gui.view.plot_config.PlotConfigure.exec_')
mocker.patch('gui.view.plot_config.PlotConfigure.is_saved', return_value=True)
mock_p = mocker.patch('gui.view.plot_config.PlotConfigure.getPlotConfiguration', return_value=config)
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
a,b,c = controller.open_configure()
assert a is not None and c is not None
assert b != ""
def test_open_configure_no_saved(mocker):
config = PlotConfiguration('name', 'ini', 'fin', 'map', 'var', 'ind', 'data')
mocker.patch('PyQt5.QtWidgets.QDialog.show')
mocker.patch('PyQt5.QtWidgets.QDialog.exec_')
mocker.patch('gui.view.plot_config.PlotConfigure.is_saved', return_value=False)
mock_p = mocker.patch('gui.view.plot_config.PlotConfigure.getPlotConfiguration', return_value=config)
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
a,b,c = controller.open_configure()
assert a is None and c is None
assert b == ""
def test_update_plots(mocker):
mocker_patch = mocker.patch('service.visualization.PlotService.PlotService.updatePlots')
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
controller.update_plots('2012-02-02','nltk')
mocker_patch.assert_called_once()
def test_get_plots(mocker):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
result = controller.get_plots()
assert result is not None
def test_delete_plot(mocker):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
controller.plot_configurations = [{'id':1}, {'id':2}, {'id':3}]
result = controller.delete_plot(2)
assert len(controller.plot_configurations) == 2
def test_delete_plot_on_non_existent(mocker):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
controller.plot_configurations = [{'id':1}, {'id':2}, {'id':3}]
result = controller.delete_plot(10)
assert len(controller.plot_configurations) == 3
def test_start_auto_scrap(mocker):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
mocker.patch('PyQt5.QtCore.QThreadPool.activeThreadCount', side_effect=[1,2,4, 24])
mocker.patch('PyQt5.QtCore.QThreadPool.start')
with pytest.raises(Exception):
controller.startAutoScrapWorker()
def test_start_scrap_worker(mocker):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
mocker.patch('PyQt5.QtCore.QThreadPool.start')
try:
controller.startScrapWorker()
except Exception:
pytest.fail("Should not fail")
@pytest.mark.parametrize("order", [(True),(False)])
def test_start_message_sample(mocker, order):
model = Mock()
model.max_threads = 12
model.auto_scraping = False
controller = MainController(model)
now = QDate(2010, 5, 2)
data = | pd.DataFrame({'Text':['Test', 'Tost'], 'NumLikes': [23, 25]}) | pandas.DataFrame |
#!/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Contact: <EMAIL>
import numpy as np
import pandas as pd
import subprocess
from scipy import constants as spc
np.set_printoptions(precision=5)
def importXYZGeom(fileName):
xyzGeom = | pd.DataFrame(columns=['atomName', 'x', 'y', 'z']) | pandas.DataFrame |
###############################################################################################
#### Initialization
import pandas as pd
import numpy as np
df = pd.read_csv(filename, header=None, names=col_names, na_values={'col_name':['-1']}, \
parse_dates=[[0, 1, 2]], index_col='Date')
# if the first 3 columns are 'year','month','day', then the dataframe would have a single col named
# 'year_month_day' of datatype 'datatime64[ns]'
# Can use df.index = df['year_month_day'] to reassign this col as the index of df
## EDA == Exploratory Data Analysis
###############################################################################################
#### Basic data exploration
df.shape # shape of dataframe
df.head(7) # print the head part of dataset
df.tail(5) # print the tail part of dataset
df.info() # return data type of each column, and number of non-null values
df.count() # count items for each column
df.describe() # summary stat of numerical data
# df.mean(), df.median(), df.std(), df.quantile([0.25, 0.75]), df.min(), df.max()
df['one_col_name'].unique() # unique values in a column
df['one_col_name'].value_counts(dropna=False) # return frequency counts of a column
df['one_col_name'].value_counts(dropna=False).head() # note the result of prev line is a pandas Series
df.idxmax(axis=0) # Or use axis='index'
df.idxmin(axis=1) # Or use axis='columns'
# indexes of max/min vals for each column/row
###############################################################################################
#### Row & column index manipulation
df.columns # names of all the columns, usually class of Index
# can be assigned with a list of new names.
df.index # can be assigned with list of new indexes. # row indexes, can be class of Index or DatatimeIndex
df.index = df.index.map(str.lower) # use map to transform the index with a function
# pandas Index objects are immutable. Must reset the whole indexes of df at once
df = df.set_index(['col1', 'col2']) # change to multiple index (index being of class MultiIndex)
df = df.sort_index() # change multiple index to hierarchical index
# use tuple to slice multiple index
# use slice(None) to indicate ":" in the tuple
# more advanced manipulation of multiple indexes = stacking and unstacking
# please refer to datacamp course "Manipulating DataFrames with pandas"
df.reindex(ordered_index) # order rows by original index with the order in ordered_index
# ordered_index = somehow ordered list of original df indices
# if some item in ordered_index is not in orig_df_indices, there would be a row with that index but NA values
df.sort_index()
###############################################################################################
#### Data visualization for inspection
# use Bar plots for discrete data counts
# use Histograms for continuous data counts
df['one_col_name'].plot('hist')
import matplotlib.pyplot as plt
plt.show()
df.boxplot(column='one_numerical_col', by='one_categorical_col') # two columns are involved
df.boxplot(column='population', by='continent') # example of above
###############################################################################################
#### Data extraction & assignment (general)
## direct column access by column name
df["country"] # This is 1D labeled array (class: pandas.core.series.Series)
df[["country"]] # This is dataframe (class: pandas.core.frame.DataFrame)
## row/column access by (built-in) numerircal indexes
df[1:2] # single row as a dataframe...
# Note: row slicing cannot use a single number, which would be regarded as a col name
df.iloc[1] # row as pandas Series
df.iloc[[1, 2, 3]]
df.iloc[[1,2,3], [0, 1]]
df.iloc[:, [0,1]]
## row/column access by labels
df.loc["RU"] # row as Pandas Series
df.loc[["RU", "IN", "CH"]] # row as Pandas dataframe
df.loc[["RU", "IN", "CH"], ["country", "capital"]]
df.loc[:, ["country", "capital"]]
## filtering
df[df["area"] > 8]
df[np.logical_and(df["area"] > 8, df["area"] < 10)] # or use the next line
df[(df["area"] > 8 & df["area"] < 10)]
df[np.logical_or(df["area"] < 8, df["area"] > 10)] # or use the next line
df[(df["area"] < 8 | df["area"] > 10)]
## extract df values as ndarrays
data_array = df.values # extract the values as ndarray
col_array = df['col_name'].values # extract column values as ndarray
np.concatenate([arr1, arr2], axis=1)
## create new columns
df['new_col'] = df['existing_col'].str[0] # extract 1st char of 'existing_col' and save as 'new_col' in df
# note that 'str' here is an attribute name
df['str_split'] = df['existing_col'].str.split('_') # split string with '_' and save as 'str_split' col
df['new_col0'] = df['str_split'].str.get(0)
df['new_col1'] = df['str_split'].str.get(1)
df['new_col'] = df['col_name'].str.upper()
df['new_mask_col'] = df['col_name'].str.contains('given_substring') # Boolean data
for label, row in df.iterrows():
df.loc[label, "new_col"] = len(row["country"]) # added a new column "new_col" as function of existing data
df["new_col"] = df["country"].apply(len)
df['new_col'] = 0.0 # assign values with broadcasting
## create new copies of existing dataframes
df2 = df.copy()
sorted_df = df.sort_values('col_name') # sort rows (including index) by values in col 'col_name'
## modify existing entries
df.iloc[::3, -1] = np.nan # assign values with broadcasting
## delete row/column
del df['col_name']
df.drop(['col_name1', 'col_name2'], axis=1)
df.drop([1, 2]) # delete rows by numerical indexes
df.drop(index='row_ind') # delete rows by row index
## manage data types
df['treatment b'] = df['treatment b'].astype(str)
df['sex'] = df['sex'].astype('category')
df['treatment a'] = pd.to_numeric(df['treatment a'], errors='coerce') # force conversion
## manage duplicate rows
df = df.drop_duplicates() # drop duplicate rows
## manage missing data (NA/null/NaN)
df_dropped = df.dropna(how='any') # drop rows with NaN values
df['sex'] = df['sex'].fillna(obj_to_fill) # in 'sex' column, fill NaN with obj_to_fill (e.g. mean value)
checker_df = df.notnull() # boolean for each entry of the dataframe
checker_df_reverse = df.isnull() # boolean for each entry of the dataframe
checker_each_col = df.notnull().all() # aggregated for each column
checker_each_col_reverse = df.isnull().any() # aggregated for each column
checker_col = df.one_col_name.notnull() # boolean for the col "one_col_name"
###############################################################################################
#### tidy data
# tidy data principle: rows contain observations, columns form variables
# pd.melt(): solve the problem of columns (names) containing values, instead of variables
# ... by turning columns into rows
new_df = pd.melt(frame=df, id_vars=list_names_cols, value_vars=['treatment a', 'treatment b'], \
var_name='treatment', value_name='result')
# the columns in list_names_cols remain unchanged
# the 'treatment a' and 'treatment b' cols become values of a new col called 'treatment'
# the original table values are collected as values of a new col called 'result'
# pivot: opposite of melting
# ... by taking unique values from a column and create new columns
weather_tidy = weather.pivot(index='date', columns='element', values='value')
# the levels in 'element' column become new col names
# if the values are not specified or multiple, the new columns would become hierarchical index
# if there is duplicate conflict, use aggregate function
weather_tidy = weather.pivot(index='date', columns='element', values='value', aggfunc=np.mean)
# more advanced manipulation of multiple indexes = stacking and unstacking
# please refer to datacamp course "Manipulating DataFrames with pandas"
###############################################################################################
#### Data (table) joining/concatenation (like in SQL)
## concatenate dataframes
vertical_stacked = df1.append(df2) # indices are also stacked
vertical_stacked.reset_index(drop=True) # result would be the same as the following line
vertical_stacked = pd.concat([df1, df2], axis=0, ignore_index=True) # new indexes range from 0 to n_tot
hori_cat = pd.concat([df1, df2], axis=1, join='outer') # rows with the same index would be merged to single row. cols are stacked
hori_cat = | pd.concat([df1, df2], axis=1, join='inner') | pandas.concat |
"""Plot statistis about missing values from given indicators."""
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
# Plot functions: each indicator has a different way of being plotted
def plot_global(indicators, plot=False, show=True, ax=None):
"""Plot statistics on the full database."""
# Get required indicators
df = indicators['global']
n_rows = df.at[0, 'n_rows']
n_cols = df.at[0, 'n_cols']
n_values = df.at[0, 'n_values']
n_mv = df.at[0, 'n_mv']
n_mv1 = df.at[0, 'n_mv1']
n_mv2 = df.at[0, 'n_mv2']
n_not_mv = df.at[0, 'n_not_mv']
f_mv = df.at[0, 'f_mv']
f_mv1 = df.at[0, 'f_mv1']
f_mv2 = df.at[0, 'f_mv2']
f_not_mv = df.at[0, 'f_not_mv']
# Print these statistics
if show:
print(
f'\n'
f'Statistics on the full data frame:\n'
f'---------------------------------\n'
f'[{n_rows} rows x {n_cols} columns]\n'
f'{n_values} values\n'
f'N NMV: {f_not_mv:.1f}% or {n_not_mv}\n'
f'N MV: {f_mv:.1f}% or {n_mv}\n'
f' N MV 1: {f_mv1:.1f}% or {n_mv1}\n'
f' N MV 2: {f_mv2:.1f}% or {n_mv2}\n'
)
# If asked, plot these statistics
if plot:
if ax is None:
_, ax = plt.subplots(figsize=(10, 4))
df_show = pd.DataFrame({
'MV1': [n_mv1],
'MV2': [n_mv2],
'MV': [n_mv],
'V': [n_values],
'type': ['Full data frame']
})
sns.set_color_codes('pastel')
sns.barplot(x='V', y='type', data=df_show, color='lightgray', ax=ax,
label=f'Not missing ({f_not_mv:.1f}%)')
sns.set_color_codes('muted')
sns.barplot(x='MV', y='type', data=df_show, color='b', ax=ax,
label=f'Missing - Not applicable ({f_mv1:.1f}%)')
sns.set_color_codes('dark')
sns.barplot(x='MV2', y='type', data=df_show, color='b', ax=ax,
label=f'Missing - Not available ({f_mv2:.1f}%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.5, box.height*0.5])
ax.legend(ncol=1, loc='center left', frameon=True,
title='Type of values',
bbox_to_anchor=(1.05, 0.5))
ax.set(ylabel='', xlabel=f'Number of values (Total {n_values})')
ax.set_title('Proportion of missing values')
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
def plot_features(indicators, plot=False, show=True, ax=None):
"""Plot the number of features with missing values."""
# Get required indicators
df = | pd.concat([indicators['features'], indicators['global']], axis=1) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = | pd.Series([True, False, False]) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os,glob, pathlib,shutil
import numpy as np
import pandas as pd
from argparse import (ArgumentParser, RawTextHelpFormatter)
def get_parser():
parser = ArgumentParser(
formatter_class=RawTextHelpFormatter,
description='ingress cpc'
)
parser.add_argument(
'-i', '--img', action='store', required=True,
help='[required]'
'\nPath of img .')
parser.add_argument(
'-o', '--out', action='store', required=True,
help='[required]'
'\nOutput path.')
return parser
opts = get_parser().parse_args()
pp = pathlib.Path(opts.img)
outputdir= opts.out
gendir=str(pp.parents[2]) #central directory
scandid=pp.parts[-2] # particlular task id
subjid=pp.parts[-1].split('_')[0] # subject id
#extract functional and anatomical
shutil.copyfile(glob.glob(gendir+'/anatomical_brain/'+'*nii.gz')[0],outputdir+'/'+subjid+'_T1wbrain.nii.gz')
shutil.copyfile(glob.glob(gendir+'/anatomical_brain_mask/'+'*nii.gz')[0],outputdir+'/'+subjid+'_T1wmask.nii.gz')
shutil.copyfile(glob.glob(gendir+'/seg_partial_volume_map/'+'*nii.gz')[0],outputdir+'/'+subjid+'_segmentation.nii.gz')
shutil.copyfile(glob.glob(gendir+'/anatomical_to_mni_nonlinear_xfm/'+'*nii.gz')[0],outputdir+'/'+subjid+'_from-T1w_to-MNI_warp.nii.gz')
shutil.copyfile(glob.glob(gendir+'/ants_affine_xfm/'+'*.mat')[0],outputdir+'/'+subjid+'_from-T1w_to-MNI_affine2.mat')
shutil.copyfile(glob.glob(gendir+'/ants_rigid_xfm/'+'*.mat')[0],outputdir+'/'+subjid+'_from-T1w_to-MNI_rigid_affine1.mat')
shutil.copyfile(glob.glob(gendir+'/ants_initial_xfm/'+'*.mat')[0],outputdir+'/'+subjid+'_from-T1w_to-MNI_initial_affine0.mat')
shutil.copyfile(glob.glob(gendir+'/functional_brain_mask/'+scandid+'/*.nii.gz')[0],outputdir+'/'+subjid+scandid+'_brainmask.nii.gz')
shutil.copyfile(glob.glob(gendir+'/mean_functional/'+scandid+'/*.nii.gz')[0],outputdir+'/'+subjid+scandid+'_referenceVolume.nii.gz')
shutil.copyfile(glob.glob(gendir+'/functional_to_anat_linear_xfm/'+scandid+'/*.mat')[0],outputdir+'/'+subjid+scandid+'_from-func_to-T1w_affine.mat')
#regressor
regressor=pd.read_csv(glob.glob(gendir+'/functional_nuisance_regressors/'+scandid+'/'+'/*/*.1D')[0],skiprows=2,sep='\t')
if 'X' in regressor.columns:
regressor.rename(columns={'# RotY':'rot_y','RotX':'rot_x','RotZ':'rot_z', 'X': 'trans_x', 'Y':'trans_y', 'Z':'trans_z'},inplace=True)
if 'GlobalSignalMean0' in regressor.columns:
regressor.rename(columns={'GlobalSignalMean0':'global_signal'},inplace=True)
if 'WhiteMatterMean0' in regressor.columns:
regressor.rename(columns={'WhiteMatterMean0':'white_matter'},inplace=True)
if 'CerebrospinalFluidMean0' in regressor.columns:
regressor.rename(columns={'CerebrospinalFluidMean0':'csf'},inplace=True)
if 'aCompCorDetrendPC0' in regressor.columns:
regressor.rename(columns={'aCompCorDetrendPC0':'a_comp_cor_00','aCompCorDetrendPC1':'a_comp_cor_01','aCompCorDetrendPC2':'a_comp_cor_02',
'aCompCorDetrendPC3':'a_comp_cor_03','aCompCorDetrendPC4':'a_comp_cor_04'},inplace=True)
if 'tCompCorDetrendPC0' in regressor.columns:
regressor.rename(columns={'tCompCorDetrendPC0':'t_comp_cor_00','tCompCorDetrendPC1':'t_comp_cor_01','tCompCorDetrendPC2':'t_comp_cor_02',
'tCompCorDetrendPC3':'t_comp_cor_03','tCompCorDetrendPC4':'t_comp_cor_04'},inplace=True)
# ask for aroma and ICA
fd=pd.read_csv(glob.glob(gendir+'/frame_wise_displacement_power/'+scandid+'/*.1D')[0],header=None,names=['framewise_displacement'])
regressors= | pd.concat([regressor, fd], axis=1) | pandas.concat |
# python 2
try:
from urllib.request import Request, urlopen
# Python 3
except ImportError:
from urllib2 import Request, urlopen
import pandas as pd
import time
import datetime
import numpy as np
import re
import json
from bs4 import BeautifulSoup
from pytrends.request import TrendReq
class Cryptory():
def __init__(self, from_date, to_date=None, ascending=False,
fillgaps=True, timeout=10.0):
"""Initialise cryptory class
Parameters
----------
from_date : the starting date (as string) for the returned data;
required format is %Y-%m-%d (e.g. "2017-06-21")
to_date : the end date (as string) for the returned data;
required format is %Y-%m-%d (e.g. "2017-06-21")
Optional. If unspecified, it will default to the current day
to_date : binary. Determines whether the returned dataframes are
ordered by date in ascending or descending order
(defaults to False i.e. most recent first)
fillgaps : binary. When data does not exist (e.g. weekends for stocks)
should the rows be filled in with the previous available data
(defaults to True e.g. Saturday stock price will be same as Friday)
fillgaps : float. The max time allowed (in seconds) to pull data from a website
If exceeded, an timeout error is returned. Default is 10 seconds.
"""
self.from_date = from_date
# if to_date provided, defaults to current date
if to_date is None:
self.to_date = datetime.date.today().strftime("%Y-%m-%d")
else:
self.to_date = to_date
self.ascending = ascending
self.fillgaps = fillgaps
self.timeout = timeout
self._df = pd.DataFrame({'date':pd.date_range(start=self.from_date, end=self.to_date)})
def extract_reddit_metrics(self, subreddit, metric, col_label="", sub_col=False):
"""Retrieve daily subscriber data for a specific subreddit scraped from redditmetrics.com
Parameters
----------
subreddit : the name of subreddit (e.g. "python", "learnpython")
metric : the particular subscriber information to be retrieved
(options are limited to "subscriber-growth" (daily change),
'total-subscribers' (total subscribers on a given day) and
'rankData' (the position of the subreddit on reddit overall)
'subscriber-growth-perc' (daily percentage change in subscribers))
col_label : specify the title of the value column
(it will default to the metric name with hyphens replacing underscores)
sub_col : whether to include the subreddit name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
if metric not in ['subscriber-growth', 'total-subscribers', 'rankData', 'subscriber-growth-perc']:
raise ValueError(
"Invalid metric: must be one of 'subscriber-growth', " +
"'total-subscribers', 'subscriber-growth-perc', 'rankData'")
url = "http://redditmetrics.com/r/" + subreddit
if metric == 'subscriber-growth-perc':
metric_name = 'total-subscribers'
else:
metric_name = metric
try:
parsed_page = urlopen(url, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
if metric == 'rankData':
start_segment = parsed_page.find(metric)
else:
start_segment = parsed_page.find("element: '"+metric_name+"'")
if start_segment != -1:
start_list = parsed_page.find("[", start_segment)
end_list = parsed_page.find("]", start_list)
parsed_page = parsed_page[start_list:end_list + 1]
else:
return pd.DataFrame({"error":"Could not find that subreddit"}, index=[0])
parsed_page = parsed_page.replace("'", '"')
parsed_page = parsed_page.replace('a', '\"subscriber_count\"')
parsed_page = parsed_page.replace('y', '\"date\"')
output = json.loads(parsed_page)
output = pd.DataFrame(output)
output['date'] = pd.to_datetime(output['date'], format="%Y-%m-%d")
if metric == 'subscriber-growth-perc':
output['subscriber_count'] = output['subscriber_count'].pct_change()
output = output[(output['date']>=self.from_date) & (output['date']<=self.to_date)]
output = output.sort_values(by='date', ascending=self.ascending).reset_index(drop=True)
if sub_col:
output['subreddit'] = subreddit
if col_label != "":
output = output.rename(columns={'subscriber_count': label})
else:
output = output.rename(columns={'subscriber_count': metric.replace("-","_")})
return output
def extract_coinmarketcap(self, coin, coin_col=False):
"""Retrieve basic historical information for a specific cryptocurrency from coinmarketcap.com
Parameters
----------
coin : the name of the cryptocurrency (e.g. 'bitcoin', 'ethereum', 'dentacoin')
coin_col : whether to include the coin name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
try:
output = pd.read_html("https://coinmarketcap.com/currencies/{}/historical-data/?start={}&end={}".format(
coin, self.from_date.replace("-", ""), self.to_date.replace("-", "")))[0]
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
output = output.assign(Date=pd.to_datetime(output['Date']))
for col in output.columns:
if output[col].dtype == np.dtype('O'):
output.loc[output[col]=="-",col]=0
output[col] = output[col].astype('int64')
output.columns = [col.lower() for col in output.columns]
if coin_col:
output['coin'] = coin
return output
def extract_bitinfocharts(self, coin, metric="price", coin_col=False, metric_col=False):
"""Retrieve historical data for a specific cyrptocurrency scraped from bitinfocharts.com
Parameters
----------
coin : the code of the cryptocurrency (e.g. 'btc' for bitcoin)
full range of available coins can be found on bitinfocharts.com
metric : the particular coin information to be retrieved
(options are limited to those listed on bitinfocharts.com
including 'price', 'marketcap', 'transactions' and 'sentinusd'
coin_col : whether to include the coin name as a column
(default is False i.e. the column is not included)
metric_col : whether to include the metric name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
if coin not in ['btc', 'eth', 'xrp', 'bch', 'ltc', 'dash', 'xmr', 'btg', 'etc', 'zec',
'doge', 'rdd', 'vtc', 'ppc', 'ftc', 'nmc', 'blk', 'aur', 'nvc', 'qrk', 'nec']:
raise ValueError("Not a valid coin")
if metric not in ['transactions', 'size', 'sentbyaddress', 'difficulty', 'hashrate', 'price',
'mining_profitability', 'sentinusd', 'transactionfees', 'median_transaction_fee',
'confirmationtime', 'marketcap', 'transactionvalue', 'mediantransactionvalue',
'tweets', 'activeaddresses', 'top100cap']:
raise ValueError("Not a valid bitinfocharts metric")
new_col_name = "_".join([coin, metric])
parsed_page = Request("https://bitinfocharts.com/comparison/{}-{}.html".format(metric, coin),
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'})
try:
parsed_page = urlopen(parsed_page, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
start_segment = parsed_page.find("new Dygraph")
if start_segment != -1:
start_list = parsed_page.find('[[', start_segment)
end_list = parsed_page.find(']]', start_list)
parsed_page = parsed_page[start_list:end_list]
else:
return pd.DataFrame({"error":"Could not find the appropriate text tag"}, index=[0])
parsed_page = parsed_page.replace('new Date(', '')
parsed_page = parsed_page.replace(')', '')
parsed_page = parsed_page.replace('null', '0')
parsed_page = parsed_page.replace('["', '{"date":"')
parsed_page = parsed_page.replace('",', '","{}":'.format(new_col_name))
parsed_page = parsed_page.replace('],', '},')
parsed_page = parsed_page + '}]'
output = json.loads(parsed_page)
output = pd.DataFrame(output)
output['date'] = pd.to_datetime(output['date'], format="%Y-%m-%d")
output = output[(output['date']>=self.from_date) & (output['date']<=self.to_date)]
# for consistency, put date column first
output = output[['date', new_col_name]]
if coin_col:
output['coin'] = coin
if metric_col:
output['metric'] = metric
return output.sort_values(by='date', ascending=self.ascending).reset_index(drop=True)
def extract_poloniex(self, coin1, coin2, coin1_col=False, coin2_col=False):
"""Retrieve the historical price of one coin relative to another (currency pair) from poloniex
Parameters
----------
coin1 : the code of the denomination cryptocurrency
(e.g. 'btc' for prices in bitcoin)
coin2 : the code for the coin for which prices are retrieved
(e.g. 'eth' for ethereum)
coin1_col : whether to include the coin1 code as a column
(default is False i.e. the column is not included)
coin2_col : whether to include the coin2 code as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
from_date = int(time.mktime(time.strptime(self.from_date, "%Y-%m-%d")))
to_date = int(time.mktime(time.strptime(self.to_date, "%Y-%m-%d")))
url = "https://poloniex.com/public?command=returnChartData¤cyPair={}_{}&start={}&end={}&period=86400".format(
coin1.upper(), coin2.upper(), from_date, to_date)
try:
parsed_page = urlopen(url, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
output = json.loads(parsed_page)
if isinstance(output, dict):
if 'error' in list(output.keys()):
return pd.DataFrame(output, index=[0])
output = pd.DataFrame(output)
# more intuitive column order
output = output[['date', 'close', 'open', 'high', 'low',
'weightedAverage', 'quoteVolume', 'volume']]
output['date'] = pd.to_datetime(output['date'], unit='s')
output = output.sort_values(by='date', ascending=self.ascending).reset_index(drop=True)
if coin1_col:
output['coin1'] = coin1
if coin2_col:
output['coin2'] = coin2
return output
def get_exchange_rates(self, from_currency="USD", to_currency="EUR",
from_col=False, to_col=False):
"""Retrieve the historical exchange rate between two (fiat) currencies
Parameters
----------
from_currency : the from currency or the currency of denomination (e.g. 'USD')
to_currency : the currency to which you wish to exchange (e.g. 'EUR')
from_col : whether to include the from_currency code as a column
(default is False i.e. the column is not included)
to_col : whether to include the to_currency code as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
n_days = (datetime.date.today() -
datetime.datetime.strptime(self.from_date, "%Y-%m-%d").date()).days + 1
url = "https://www.indexmundi.com/xrates/graph.aspx?c1={}&c2={}&days={}".format(
from_currency, to_currency, n_days)
try:
parsed_page = urlopen(url, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
start_segment = parsed_page.find("chart xAxisName")
if start_segment != -1:
start_list = parsed_page.find("<", start_segment)
end_list = parsed_page.find("/></chart>", start_list)
parsed_page = parsed_page[start_list:end_list]
else:
return pd.DataFrame({"error":"Could not find the appropriate text tag"}, index=[0])
parsed_page = re.sub(r" showLabel='[0-9]'", "", parsed_page)
parsed_page = parsed_page.replace("'", '"')
parsed_page = parsed_page.replace("set ", '')
parsed_page = parsed_page.replace("<", "{")
parsed_page = parsed_page.replace("/>", "},")
parsed_page = parsed_page.replace('label', '\"date\"')
parsed_page = parsed_page.replace('value', '\"exch_rate\"')
parsed_page = parsed_page.replace('=', ':')
parsed_page = parsed_page.replace(' ', ',')
output = json.loads('[' + parsed_page + '}]')
output = pd.DataFrame(output)
output['date'] = pd.to_datetime(output['date'], format="%m/%d/%Y")
output['exch_rate'] = pd.to_numeric(output['exch_rate'], errors='coerce')
if from_col:
output['from_currency'] = from_currency
if to_col:
output['to_currency'] = to_currency
output = self._merge_fill_filter(output)
return output
def get_stock_prices(self, market, market_name=None):
"""Retrieve the historical price (or value) of a publically listed stock or index
Parameters
----------
market : the code of the stock or index (see yahoo finance for examples)
('%5EDJI' refers to the Dow Jones and '%5EIXIC' pulls the Nasdaq index)
market_name : specify an appropriate market name or label (under the market_name column)
the default is None (default is None i.e. the column is not included)
Returns
-------
pandas Dataframe
Notes
-----
This method scrapes data from yahoo finance, so it only works when the historical
data is presented on the site (which is not the case for a large number of stocks/indices).
"""
from_date = int(time.mktime(time.strptime(self.from_date, "%Y-%m-%d")))
# we want the daily data
# this site works off unix time (86400 seconds = 1 day)
to_date = int(time.mktime(time.strptime(self.to_date, "%Y-%m-%d"))) + 86400
url = "https://finance.yahoo.com/quote/{}/history?period1={}&period2={}&interval=1d&filter=history&frequency=1d".format(
market, from_date, to_date)
try:
parsed_page = urlopen(url, timeout=1).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return | pd.DataFrame({"error":e}, index=[0]) | pandas.DataFrame |
import pandas as pd
# import re
def processFE_df(df):
"""Function to process Pandas dataframe from Funds Explorer site:
'https://www.fundsexplorer.com.br/ranking'
After this function the DataFrame can be filtered to analysis
Args:
df ([type]): pandas.core.frame.DataFrame
Returns:
[type]: pandas.core.frame.DataFrame
"""
df.columns = ['codigo', 'setor', 'precoatualR$', 'liqdiariaNeg',
'dividR$', 'divyield%', 'dy3macum%', 'dy6macum%',
'dy12macum%', 'dy3mmedia%', 'dy6mmedia%', 'dy12mmedia%',
'dyano%', 'varpreco%', 'rentper%', 'rentacum%',
'patrliqR$', 'vpaR$', 'p/vpaN', 'dypatr%', 'varpatr%',
'rentpatrper%', 'rentpatracum%', 'vacfisica%',
'vacfinan%', 'qtdativosN']
df = df.applymap(lambda x: str(x).replace('R$', ''))
df = df.applymap(lambda x: str(x).replace('%', ''))
df['precoatualR$'] = df['precoatualR$'].apply(lambda x:
str(x).replace('.', ''))
df['patrliqR$'] = df['patrliqR$'].apply(lambda x:
str(x).replace('.', ''))
df['vpaR$'] = df['vpaR$'].apply(lambda x: str(x).replace('.', ''))
df = df.applymap(lambda x: str(x).replace(',', '.'))
df['setor'] = df['setor'].apply(lambda x: str(x).replace('Ã', 'i'))
# df['setor'] = df['setor'].apply(lambda x: re.sub(r'Ã ', 'i', x))
df['codigo'] = df['codigo'].astype('string')
df['setor'] = df['setor'].astype('string')
df['precoatualR$'] = pd.to_numeric(df['precoatualR$'], errors='coerce')
df['liqdiariaNeg'] = pd.to_numeric(df['liqdiariaNeg'], errors='coerce')
df['dividR$'] = pd.to_numeric(df['dividR$'], errors='coerce')
df['divyield%'] = pd.to_numeric(df['divyield%'], errors='coerce')
df['dy3macum%'] = pd.to_numeric(df['dy3macum%'], errors='coerce')
df['dy6macum%'] = pd.to_numeric(df['dy6macum%'], errors='coerce')
df['dy12macum%'] = pd.to_numeric(df['dy12macum%'], errors='coerce')
df['dy3mmedia%'] = pd.to_numeric(df['dy3mmedia%'], errors='coerce')
df['dy6mmedia%'] = | pd.to_numeric(df['dy6mmedia%'], errors='coerce') | pandas.to_numeric |
"""Script for generating data for the analysis."""
from typing import Dict
import ast
from pathlib import Path
import numpy as np
import pandas as pd
import igraph as ig
import joblib
from tqdm import tqdm
from pathcensus import PathCensus
from pathcensus.nullmodels import UBCM
from pathcensus.inference import Inference
from pathcensus.utils import set_seed
from src.utils import list_graphs, load_graph
# Graph statistics function ---------------------------------------------------
def statistics(graph: ig.Graph) -> pd.DataFrame:
"""Function for calculating graph statistics."""
paths = PathCensus(graph)
coefs = paths.coefs("nodes")
return pd.DataFrame({
"sim_g": paths.similarity("global"),
"sim": coefs["sim"].mean(),
"sim_e": paths.similarity("edges").mean(),
"comp_g": paths.complementarity("global"),
"comp": coefs["comp"].mean(),
"comp_e": paths.complementarity("edges").mean(),
"coefs": [coefs]
}, index=[0])
def get_metadata(graph: ig.Graph) -> Dict:
"""Get graph metadata dictionary."""
degseq = np.array(graph.degree())
taxonomy = ast.literal_eval(graph["taxonomy"])
bio1 = taxonomy[1]
bio2 = taxonomy[2]
bio3 = taxonomy[3]
if bio3 == "Metazoa":
bio = "Animal"
elif bio3 == "Fungi":
bio = bio3
elif bio2 == "Viridiplantae":
bio = bio2
else:
bio = bio1
return {
"dataset": "tree-of-life",
"domain": graph["domain"],
"relation": graph["relation"],
"label": graph["label"],
"name": graph["name"],
"long_name": graph["long_name"],
"taxonomy": [taxonomy],
"taxonomy_l2": graph["taxonomy_level2"],
"biodomain": bio,
"evo_length": len(taxonomy),
"evo_time": np.float64(graph["evo_time"]),
"pub_count": int(graph["pub_count"]),
"n_nodes": graph.vcount(),
"density": graph.vcount(),
"dbar": degseq.mean(),
"dcv": degseq.std() / degseq.mean(),
"dmin": degseq.min(),
"dmax": degseq.max()
}
# Prepare data ----------------------------------------------------------------
HERE = Path(__file__).parent
DATA = HERE/"data"
DATA.mkdir(parents=True, exist_ok=True)
# Number of null model samples
N_SAMPLES = 100
# Seed for the random number generator
# used for sampling from the null model
set_seed(44)
rawdata = []
nulltrend = []
calibrated = []
signif_01 = []
signif_05 = []
signif_10 = []
pbar = tqdm(list_graphs("proteins"))
for network in pbar:
pbar.set_description(network.split("__")[-1])
graph = load_graph("proteins", network, preprocess=True)
meta = get_metadata(graph)
model = UBCM(graph)
model.fit()
model.validate()
infer = Inference(graph, model, statistics)
data, null = infer.init_comparison(N_SAMPLES, null_kws=dict(progress=True))
# Estimate fractions of significant nodes
odf = pd.concat(data.pop("coefs").tolist())
ndf = pd.concat(null.pop("coefs").tolist())
infer.add_stats_index(odf)
infer.add_stats_index(ndf)
odf = pd.concat([odf], keys=[0], names=["_"])
ndf = pd.concat([ndf], keys=[0], names=["_"])
pvals = infer.estimate_pvalues(odf, ndf, adjust=False)
sigs = []
for alpha in (.01, .05, .1):
pv = infer.adjust_pvalues(pvals, alpha=alpha, copy=True)
sig = (pv <= alpha)[["sim", "comp"]]
sig["both"] = sig.all(axis=1)
sig["sim"] &= ~sig["both"]
sig["comp"] &= ~sig["both"]
sig["neither"] = 1 - sig[["sim", "comp", "both"]].sum(axis=1)
sig = sig.mean().to_frame().T
# Add graph metadata
for k, v in reversed(meta.items()):
sig.insert(0, k, v)
# Append to data list
sigs.append(sig)
# Unpack significance data
sig01, sig05, sig10 = sigs
# Compute null model averages
null_avg = null.mean().to_frame().T
# Compute calibrated coefficients
cdata = np.log(data / null).reset_index(drop=True) \
.replace([np.inf, -np.inf], np.nan) \
.dropna() \
.mean() \
.to_frame().T
# Add graph metadata
for k, v in reversed(meta.items()):
cdata.insert(0, k, v)
data.insert(0, k, v)
null_avg.insert(0, k, v)
rawdata.append(data)
nulltrend.append(null_avg)
calibrated.append(cdata)
signif_01.append(sig01)
signif_05.append(sig05)
signif_10.append(sig10)
# Prepare data frame ----------------------------------------------------------
proteins = {
"raw": pd.concat(rawdata, axis=0, ignore_index=True),
"null_trend": pd.concat(nulltrend, axis=0, ignore_index=True),
"calibrated": pd.concat(calibrated, axis=0, ignore_index=True),
"signif_01": | pd.concat(signif_01, axis=0, ignore_index=True) | pandas.concat |
import os.path as osp
import os
import torch
import numpy as np
import pandas as pd
import math
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
import torch_geometric.transforms as T
from ..bin.BaseDataset import BaseDataset, ElectroNegativityDiff, Complete
from ..bin.Label2Idx import ArR_label_Idx_Insertion
transform = T.Compose([Complete(), ElectroNegativityDiff(norm=False)])
class ReactionDataset(InMemoryDataset):
def __init__(self, root, mode='dev', transform=None, pre_transform=None, pre_filter=None):
self.root = root
self.mode = mode
self.mode_0 = mode.split('_')[0]
assert self.mode_0 in ['dev', 'valid',
'test'], "mode_0 should be dev/valid/test"
super(ReactionDataset, self).__init__(
root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
if self.mode_0 == 'dev':
return [self.mode, '%s/TrainSet_Labels.csv'%self.mode]
elif self.mode_0 == 'valid':
return [self.mode, '%s/ValidSet_Labels.csv'%self.mode]
else:
return [self.mode, '%s/TestSet_Labels.csv'%self.mode]
@property
def processed_file_names(self):
return '%s/ReactionDataset_ArR_%s.pt' % (self.mode, self.mode)
def download(self):
return 0
#raise NotImplementedError('please download and unzip dataset from %s, and put it at %s' % (
# _urls[self.mode], self.raw_dir))
def base_data_index_collector(self):
R_dataset = BaseDataset(root=self.root, mode='R', suffix=self.mode, pre_transform=transform)
Ar_dataset = BaseDataset(root=self.root, mode='Ar', suffix=self.mode, pre_transform=transform)
self.R_dataset, self.Ar_dataset = R_dataset, Ar_dataset
ArR_label_Idx_Insertion(self.raw_paths[1])
target = pd.read_csv(self.raw_paths[1], index_col=0,
usecols=['Reaction_idx', 'Radical_idx', 'Ar_idx', 'loc_idx', 'DG_TS'])
origin_Ar_loc = Ar_dataset.data.keyAtom_list[:,
0]*10 + Ar_dataset.data.keyAtom_list[:, 1]
tmp_Ar_loc = list(target['Ar_idx']*10 + target['loc_idx'])
self.Ar_loc_index = (origin_Ar_loc == torch.LongTensor(
tmp_Ar_loc).unsqueeze(0).transpose(1, 0)).nonzero()[:, 1]
check_list = [x for x in tmp_Ar_loc if x not in origin_Ar_loc]
if check_list:
print(check_list)
self.Ar_index = (Ar_dataset.data.alias == torch.LongTensor(
list(target['Ar_idx'])).unsqueeze(0).transpose(1, 0)).nonzero()[:, 1]
self.R_index = (R_dataset.data.alias == torch.LongTensor(
list(target['Radical_idx'])).unsqueeze(0).transpose(1, 0)).nonzero()[:, 1]
self.target = target[['DG_TS']]
self.target_labels = ['DG_TS']
def reaction_reader(self, i, ArR_idx):
'''
i : index of ArR_idx in ArR ReactionDataset
ArR_idx: Reaction_idx in ArR ReactionDataset,
form: 6 digit integer,
[R_idx:2][Ar_idx:3][loc_idx:1]
y: DG_TS in ArR ReactionDataset
'''
local_f = ['ACSF_local', 'SOAP', 'PhyChem_local']
total_f = ['MACCSfp', 'Morganfp', 'mergefp', 'PhyChem_total',
'ACSF', 'pos', 'x', 'edge_attr', 'CM', 'BoB']
other_f = ['edge_index']
DG_TS = torch.FloatTensor(self.target.iloc[i].tolist())
data = Data(y=DG_TS)
data.ArR_idx = torch.LongTensor([ArR_idx])
for f in local_f:
Arl_i = self.Ar_loc_index[i]
Arl_j = self.Ar_loc_index[i]+1
data['Ar_'+f] = self.Ar_dataset.data[f][Arl_i:Arl_j, :]
R_i = self.R_dataset.slices[f][self.R_index[i]]
R_j = self.R_dataset.slices[f][self.R_index[i]+1]
data['R_'+f] = self.R_dataset.data[f][R_i:R_j, :]
for f in total_f:
Ar_i = self.Ar_dataset.slices[f][self.Ar_index[i]]
Ar_j = self.Ar_dataset.slices[f][self.Ar_index[i]+1]
data['Ar_'+f] = self.Ar_dataset.data[f][Ar_i:Ar_j, :]
R_i = self.R_dataset.slices[f][self.R_index[i]]
R_j = self.R_dataset.slices[f][self.R_index[i]+1]
data['R_'+f] = self.R_dataset.data[f][R_i:R_j, :]
for f in other_f:
Ar_i = self.Ar_dataset.slices[f][self.Ar_index[i]]
Ar_j = self.Ar_dataset.slices[f][self.Ar_index[i]+1]
data['Ar_'+f] = self.Ar_dataset.data[f][:, Ar_i:Ar_j]
R_i = self.R_dataset.slices[f][self.R_index[i]]
R_j = self.R_dataset.slices[f][self.R_index[i]+1]
data['R_'+f] = self.R_dataset.data[f][:, R_i:R_j]
return data
def process(self):
self.base_data_index_collector()
data_list = []
for i, ArR_idx in enumerate(self.target.index):
ArR_data = self.reaction_reader(i, ArR_idx)
if ArR_data is not None:
data_list.append(ArR_data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
processed_dir = os.path.dirname(self.processed_paths[0])
if not os.path.isdir(processed_dir):
os.makedirs(processed_dir)
torch.save((data, slices), self.processed_paths[0])
def Transform_DG_to_DDG(TrainSet, scale_ref=5, verbose=True):
def Part_select(a, b, which_part):
assert which_part in ['up', 'down'], "which_part should be up/down"
if which_part == 'up':
return a > b
elif which_part == 'down':
return a < b
def get_Scale(DDG):
# scale: @A vs @B
scale = np.piecewise(DDG, [DDG >= 0, DDG < 0], [lambda x: math.exp(-x*4.184*1000/(
298.15*8.31451)), lambda x: -math.exp(x*4.184*1000/(298.15*8.31451))])
return scale
def get_Type(scale, scale_ref=5):
Type = np.piecewise(scale, [scale < -1/scale_ref, -1/scale_ref <= scale < -(1/scale_ref)**2,
-(1/scale_ref)**2 <= scale < 0, 0 < scale <= (1/scale_ref)**2,
(1/scale_ref)**2 < scale <= 1/scale_ref, 1/scale_ref < scale],
[0, -1, -2, 2, 1, 0])
return Type
Group_data = TrainSet.groupby(['Ar_idx', 'loc_idx', 'Radical_idx'])[
'DG_TS'].mean().unstack()
Ars = Group_data.index.levels[0]
Radicals = Group_data.columns
def get_DDG(which_part='up'):
DDG_TS_ls = []
for Ar in Ars:
for R in Radicals:
loc = Group_data.loc[Ar, R].index
for loc_A in loc:
for loc_B in loc:
if Part_select(loc_B, loc_A, which_part):
DG_TS_A = Group_data.loc[Ar, loc_A][R]
DG_TS_B = Group_data.loc[Ar, loc_B][R]
if pd.notna(DG_TS_A) == True and | pd.notna(DG_TS_B) | pandas.notna |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import seaborn as sns
from scipy import stats
# from scipy.optimize import root
from pyapprox import generate_independent_random_samples
import matplotlib as mpl
from scipy import stats
from scipy.stats import spearmanr
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = False # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'png' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# mpl.rc('xtick', labelsize=20)
# mpl.rc('ytick', labelsize=20)
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
from funcs.read_data import file_settings, variables_prep
from adaptive_gp_model import *
# Calculate the ratio of samples in the subregion
def ratio_subreg(gp):
"""
Function to calculate the ratio of samples in the subregion in the adaptive procedure.
Parameters:
===========
gp: Gaussian Process object
Return:
=======
ration_df: pd.DataFrame, dataframe of the ratios at each iteration.
"""
y_training = gp.y_train_
# num_new_samples = np.asarray([0]+[20]+[8]*10+[16]*20+[24]*16+[40]*14)
num_new_samples = np.asarray([20]+[8]*10+[16]*20+[24]*20+[40]*18)
num_samples = np.cumsum(num_new_samples)
ratio_samples = np.zeros(shape=(num_new_samples.shape[0]-2, 2))
ratio_sum = 0
for ii in range(num_new_samples.shape[0] - 2):
num_subreg = np.where(y_training[num_samples[ii]: num_samples[ii+1]]>0)[0].shape[0]
ratio_sum = ratio_sum + num_subreg
ratio_samples[ii, 0] = num_subreg / num_new_samples[ii+1]
ratio_samples[ii, 1] = ratio_sum / num_samples[ii+1]
ratio_df = pd.DataFrame(data=ratio_samples,
index=np.arange(ratio_samples.shape[0]), columns=['Subregion', 'FullSpace'])
ratio_df['num_samples'] = num_samples[1:-1]
return ratio_df
# END ratio_subreg()
from funcs.utils import define_constants
def choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals):
"""
Function used to set the nomial point for fixing parameters at.
Parameters:
===========
plot_range: str, decide which type of nomial values to use.
dot_samples: np.ndarray, of shape D*N where D is the number of parameters,
the initial parameter samples for calculation objective functions
samples_opt: np.ndarray, of shape D*M where D is the number of parameters,
parameter samples resulting in objective functions above the threshold
dot_vals: np.ndarray, objective function values from dot_samples
Return:
===========
x_default: list, the nominal values for all D parameters
fig_path: str, the dir defined by the type of nominal values for results to save
"""
if plot_range == 'full_mean':
x_default = define_constants(dot_samples, 13, stats = np.mean)
fig_path = 'fix_mean_full'
elif plot_range == 'sub_median':
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
x_default = define_constants(samples_opt, 13, stats = np.median)
fig_path = 'fix_median_subreg'
elif plot_range == 'sub_mean':
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
x_default = define_constants(samples_opt, 13, stats = np.mean)
fig_path = 'fix_mean_subreg'
elif plot_range == 'sub_rand':
x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 38] # 8 for analytic, 38 for sample
fig_path = 'fix_rand_subreg'
elif plot_range == 'full_rand':
breakpoint()
x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 8] # 8 for analytic, 38 for sample
fig_path = 'fix_rand_subreg'
elif (plot_range == 'sub_max')|(plot_range == 'full_max'):
x_default = dot_samples[:, np.where(dot_vals>=dot_vals.max())[0]]
fig_path = 'fix_max_subreg'
else:
AssertionError
return x_default, fig_path
def cal_stats(vals_opt, vals_dict, re_eval):
"""
Function used to calculate the statstics of the objective values VS parameter fixing.
Parameters:
===========
vals_dict: dict, containing the objective function values with parameters being fixed
vals_opt: np.ndarray, objective function values used to calculate the statistics
re_eval: Bool, re-evaluate the OBJ using the whole samples if True,
else using the optimal set only for parameter fixing
Return:
===========
df_stats: pd.DataFrame, of statistics
"""
# PDF plot
df_stats = | pd.DataFrame(columns=['mean', 'std', 'qlow','qup']) | pandas.DataFrame |
import textwrap
import requests
import pandas as pd
import numpy as np
import warnings
from json.decoder import JSONDecodeError
from typing import Any
NULL = np.nan
class Client:
def __init__(
self, url: str, headers: dict = {},
field_separator: str = '.', **kwargs: Any
):
self.url = url
self.headers = headers
self.field_separator = field_separator
self.options = kwargs
def query(
self, query: str, variables: dict = None,
operation_name: str = None, headers: dict = {},
flatten: bool = True, schema: dict = None,
**kwargs: Any,
):
if schema and self.validate_schema(schema) and not flatten:
raise ValueError("Argument `flatten` must be `True` for schema to be applied.")
body = {'query': query, 'variables': variables, 'operation_name': operation_name}
body = {k:v for k, v in body.items() if v}
with requests.Session() as s:
r = s.post(
self.url,
json=body,
headers={**self.headers, **headers},
**{**self.options, **kwargs},
)
try:
data = r.json()['data']
assert r.status_code == 200
except (KeyError, AssertionError) as e:
raise FailedRequest(self.url, query, r.status_code, r.json(), str(e))
except (TypeError, JSONDecodeError) as e:
raise FailedRequest(self.url, query, r.status_code, None, str(e))
except Exception as e:
raise FailedRequest(self.url, query, None, None, str(e))
if flatten:
df = self.flatten(data)
if schema:
df = self.apply_schema(df, schema)
return df
else:
return data
def is_empty(self, data) -> bool:
if isinstance(data, dict):
return all( self.is_empty(data[k]) for k in data.keys() )
else:
if data:
return False
else:
return True
def flatten(self, data: dict) -> pd.DataFrame:
if self.is_empty(data):
return pd.DataFrame()
else:
df = | pd.json_normalize(data, sep=self.field_separator) | pandas.json_normalize |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = pd.read_excel(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status']
temp_df['Door_ID'] = int(name.split('_')[0][4:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_door = pd.concat([combined_door, temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
for hvac_name in hvac_files:
name, extension = os.path.splitext(hvac_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
else:
temp_df = | pd.read_excel(hvac_name, usecols=[0, 1]) | pandas.read_excel |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load messages and categories datasets as dataframes; merge the messages and categories datasets using the common id.
:param messages_filepath: file path of message dataset.
:param categories_filepath: file path of categories dataset.
:return: a dataframe that merge the two datasets.
"""
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 29 18:37:07 2021
@author: Jannik
"""
import urllib.request
import glob
from unidecode import unidecode
import os
import zipfile
import pandas as pd
import datetime
import shutil
import datetime
# look up abbreviations of voivodships
terit_to_abbr = {"t24": "PL83", "t14": "PL78", "t12": "PL77", "t30": "PL86",
"t10": "PL74", "t02": "PL72", "t22": "PL82", "t18": "PL80",
"t04": "PL73", "t06": "PL75", "t16": "PL79", "t26": "PL84",
"t20": "PL81", "t32": "PL87", "t28": "PL85", "t08": "PL76",
"t00": "PL"}
# due to unknown encoding we have to map the names back to real names
map_abbr_name = {"PL72": "Dolnoslaskie", "PL73": "Kujawsko-Pomorskie",
"PL75": "Lubelskie", "PL76": "Lubuskie", "PL74": "Lodzkie",
"PL77": "Malopolskie", "PL78": "Mazowieckie",
"PL79": "Opolskie", "PL80": "Podkarpackie", "PL81": "Podlaskie",
"PL82": "Pomorskie", "PL83": "Slaskie", "PL84": "Swietokrzyskie",
"PL85": "Warminsko-Mazurskie", "PL86": "Wielkopolskie",
"PL87": "Zachodniopomorskie", "PL": "Poland"}
# download raw zip file
urllib.request.urlretrieve("https://arcgis.com/sharing/rest/content/items/a8c562ead9c54e13a135b02e0d875ffb/data", "poland.zip")
# extract file
with open(os.path.abspath("poland.zip"), mode="rb") as file:
zip_ref = zipfile.ZipFile(file)
os.mkdir(os.path.join(os.getcwd(), "poland_unzip"))
zip_ref.extractall("poland_unzip")
curr_inc_case = pd.read_csv("../../data-truth/MZ/truth_MZ-Incident Cases_Poland.csv")
curr_inc_deaths = pd.read_csv("../../data-truth/MZ/truth_MZ-Incident Deaths_Poland.csv")
curr_cum_case = pd.read_csv("../../data-truth/MZ/truth_MZ-Cumulative Cases_Poland.csv")
curr_cum_deaths = pd.read_csv("../../data-truth/MZ/truth_MZ-Cumulative Deaths_Poland.csv")
inc_case_dfs = []
inc_death_dfs = []
# get csv files
for file in os.listdir("./poland_unzip"):
if file.endswith(".csv"):
date_raw = file[0:8]
date = datetime.datetime.strptime(date_raw[0:4] + "." + date_raw[4:6] + "." + date_raw[6:], "%Y.%m.%d")
if date > datetime.datetime.strptime("2021.07.15", "%Y.%m.%d"):
print("Processing date: {}".format(date))
df = pd.read_csv(os.path.join(os.path.join(os.getcwd(), "poland_unzip"), file), sep=";", encoding="cp1252")
df.drop(columns=["wojewodztwo"], inplace=True)
df["location"] = df["teryt"].apply(lambda x: terit_to_abbr[x])
df["location_name"] = df["location"].apply(lambda x: map_abbr_name[x])
#extract date from filename
df["date"] = (date_raw[0:4] + "." + date_raw[4:6] + "." + date_raw[6:])
df["date"] = pd.to_datetime(df["date"], format="%Y.%m.%d")
#shift to ecdc
df["date"] = df["date"].apply(lambda x: x + datetime.timedelta(days=1))
inc_case_df = df[["date", "location_name", "location", "liczba_przypadkow"]].rename(columns={"liczba_przypadkow": "value"})
inc_case_dfs.append(inc_case_df)
inc_death_df = df[["date", "location_name", "location", "zgony"]].rename(columns={"zgony": "value"})
inc_death_dfs.append(inc_death_df)
inc_case_df = pd.concat(inc_case_dfs)
inc_death_df = pd.concat(inc_death_dfs)
# only use new data
last_update = curr_inc_case["date"].max()
# cut off dates
inc_case_df = inc_case_df[inc_case_df["date"] > last_update]
inc_death_df = inc_death_df[inc_death_df["date"] > last_update]
inc_case_df["date"] = inc_case_df["date"].dt.date
inc_death_df["date"] = inc_death_df["date"].dt.date
# add new data to dataframe
final_inc_case = pd.concat([curr_inc_case, inc_case_df])
final_inc_case = final_inc_case.set_index("date")
final_inc_deaths = pd.concat([curr_inc_deaths, inc_death_df])
final_inc_deaths = final_inc_deaths.set_index("date")
#create latest cumulatve data
cum_case_df = inc_case_df.copy()
cum_case_df["value"] = cum_case_df.groupby("location").cumsum()
cum_death_df = inc_death_df.copy()
cum_death_df["value"] = cum_death_df.groupby("location").cumsum()
# add up cum data
latest_death_sum = curr_cum_deaths[curr_cum_deaths["date"] == curr_cum_deaths["date"].max()][["location", "value"]]
latest_death_sum = pd.Series(latest_death_sum.value.values, index=latest_death_sum.location).to_dict()
latest_case_sum = curr_cum_case[curr_cum_case["date"] == curr_cum_case["date"].max()][["location", "value"]]
latest_case_sum = pd.Series(latest_case_sum.value.values, index=latest_case_sum.location).to_dict()
def update_cumulative(location, value, previous_cumulative):
return value + previous_cumulative[location]
cum_death_df["value"] = cum_death_df.apply(lambda row: update_cumulative(row["location"], row["value"], latest_death_sum), axis=1)
cum_case_df["value"] = cum_case_df.apply(lambda row: update_cumulative(row["location"], row["value"], latest_case_sum), axis=1)
final_cum_case = | pd.concat([curr_cum_case, cum_case_df]) | pandas.concat |
import datetime as dtm
from functools import lru_cache
from typing import Optional, Callable
import pandas as pd
from .base_classes import SingleFinancialSymbolSource, FinancialSymbolsSource
from .._settings import data_url, change_column_name
from ..common.enums import Currency, SecurityType, Period
from ..common.financial_symbol import FinancialSymbol
from ..common.financial_symbol_id import FinancialSymbolId
from ..common.financial_symbol_info import FinancialSymbolInfo
class CbrTopRatesSource(SingleFinancialSymbolSource):
def _load_rates(self):
df = pd.read_csv('{}cbr_deposit_rate/data.csv'.format(data_url), sep='\t')
df.sort_values(by='decade', inplace=True)
df.rename(columns={'close': change_column_name, 'decade': 'date'},
inplace=True)
return df
def _load_dates(self, kind):
index = pd.read_csv('{}cbr_deposit_rate/__index.csv'.format(data_url), sep='\t')
period_str = index[kind][0]
return pd.Period(period_str, freq='M')
def __init__(self):
super().__init__(
namespace='cbr',
name='TOP_rates',
values_fetcher=lambda: self._load_rates(),
start_period=self._load_dates(kind='date_start'),
end_period=self._load_dates(kind='date_end'),
long_name='Динамика максимальной процентной ставки (по вкладам в российских рублях)',
currency=Currency.RUB,
security_type=SecurityType.RATES,
period=Period.DECADE,
adjusted_close=False,
)
class CbrCurrenciesSource(FinancialSymbolsSource):
def __init__(self):
super().__init__(namespace='cbr')
self.url_base = data_url + 'currency/'
self.index = pd.read_csv(self.url_base + '__index.csv', sep='\t', index_col='name')
self.__short_names = {
Currency.RUB: 'Рубль РФ',
Currency.USD: 'Доллар США',
Currency.EUR: 'Евро',
}
self._currency_min_date = {
Currency.RUB.name: pd.Period('1990', freq='D'),
Currency.USD.name: pd.Period('1913', freq='D'),
Currency.EUR.name: pd.Period('1996', freq='D'),
}
@lru_cache(maxsize=512)
def __currency_values(self, name: str) -> Callable[[pd.Period, pd.Period], pd.DataFrame]:
def func(start_period: pd.Period, end_period: pd.Period) -> pd.DataFrame:
start_period = max(start_period,
| pd.Period(self._currency_min_date[name], freq='M') | pandas.Period |
#https://www.kaggle.com/niteshx2/top-50-beginners-stacking-lgb-xgb
#https://www.kaggle.com/kernels/scriptcontent/11907006/download
import numpy as np # linear algebra
import pandas as pd #
from datetime import datetime
from scipy.stats import skew # for some statistics
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from mlxtend.regressor import StackingCVRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
import litemort
import os
data_root = f"F:/Datasets/house"
print(os.listdir(data_root))
# Based on https://www.kaggle.com/hemingwei/top-2-from-laurenstc-on-house-price-prediction
train = pd.read_csv(f'{data_root}/train.csv')
test = pd.read_csv(f'{data_root}/test.csv')
print("Train set size:", train.shape)
print("Test set size:", test.shape)
print('START data processing', datetime.now(), )
train_ID = train['Id']
test_ID = test['Id']
# Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
# Deleting outliers
train = train[train.GrLivArea < 4500]
train.reset_index(drop=True, inplace=True)
# We use the numpy fuction log1p which applies log(1+x) to all elements of the column
train["SalePrice"] = np.log1p(train["SalePrice"])
y = train.SalePrice.reset_index(drop=True)
train_features = train.drop(['SalePrice'], axis=1)
test_features = test
features = pd.concat([train_features, test_features]).reset_index(drop=True)
print(features.shape)
# Some of the non-numeric predictors are stored as numbers; we convert them into strings
features['MSSubClass'] = features['MSSubClass'].apply(str)
features['YrSold'] = features['YrSold'].astype(str)
features['MoSold'] = features['MoSold'].astype(str)
features['Functional'] = features['Functional'].fillna('Typ')
features['Electrical'] = features['Electrical'].fillna("SBrkr")
features['KitchenQual'] = features['KitchenQual'].fillna("TA")
features['Exterior1st'] = features['Exterior1st'].fillna(features['Exterior1st'].mode()[0])
features['Exterior2nd'] = features['Exterior2nd'].fillna(features['Exterior2nd'].mode()[0])
features['SaleType'] = features['SaleType'].fillna(features['SaleType'].mode()[0])
features["PoolQC"] = features["PoolQC"].fillna("None")
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
features[col] = features[col].fillna(0)
for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']:
features[col] = features[col].fillna('None')
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
features[col] = features[col].fillna('None')
features['MSZoning'] = features.groupby('MSSubClass')['MSZoning'].transform(lambda x: x.fillna(x.mode()[0]))
objects = []
for i in features.columns:
if features[i].dtype == object:
objects.append(i)
features.update(features[objects].fillna('None'))
features['LotFrontage'] = features.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
# Filling in the rest of the NA's
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numerics = []
for i in features.columns:
if features[i].dtype in numeric_dtypes:
numerics.append(i)
features.update(features[numerics].fillna(0))
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numerics2 = []
for i in features.columns:
if features[i].dtype in numeric_dtypes:
numerics2.append(i)
skew_features = features[numerics2].apply(lambda x: skew(x)).sort_values(ascending=False)
high_skew = skew_features[skew_features > 0.5]
skew_index = high_skew.index
for i in skew_index:
features[i] = boxcox1p(features[i], boxcox_normmax(features[i] + 1))
features = features.drop(['Utilities', 'Street', 'PoolQC',], axis=1)
features['YrBltAndRemod']=features['YearBuilt']+features['YearRemodAdd']
features['TotalSF']=features['TotalBsmtSF'] + features['1stFlrSF'] + features['2ndFlrSF']
features['Total_sqr_footage'] = (features['BsmtFinSF1'] + features['BsmtFinSF2'] +
features['1stFlrSF'] + features['2ndFlrSF'])
features['Total_Bathrooms'] = (features['FullBath'] + (0.5 * features['HalfBath']) +
features['BsmtFullBath'] + (0.5 * features['BsmtHalfBath']))
features['Total_porch_sf'] = (features['OpenPorchSF'] + features['3SsnPorch'] +
features['EnclosedPorch'] + features['ScreenPorch'] +
features['WoodDeckSF'])
# simplified features
features['haspool'] = features['PoolArea'].apply(lambda x: 1 if x > 0 else 0)
features['has2ndfloor'] = features['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0)
features['hasgarage'] = features['GarageArea'].apply(lambda x: 1 if x > 0 else 0)
features['hasbsmt'] = features['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)
features['hasfireplace'] = features['Fireplaces'].apply(lambda x: 1 if x > 0 else 0)
print(features.shape)
final_features = pd.get_dummies(features).reset_index(drop=True)
print(final_features.shape)
X = final_features.iloc[:len(y), :]
X_sub = final_features.iloc[len(X):, :]
print('X', X.shape, 'y', y.shape, 'X_sub', X_sub.shape)
outliers = [30, 88, 462, 631, 1322]
X = X.drop(X.index[outliers])
y = y.drop(y.index[outliers])
overfit = []
for i in X.columns:
counts = X[i].value_counts()
zeros = counts.iloc[0]
if zeros / len(X) * 100 > 99.94:
overfit.append(i)
overfit = list(overfit)
#overfit.append('MSZoning_C (all)')
X = X.drop(overfit, axis=1).copy()
X_sub = X_sub.drop(overfit, axis=1).copy()
print('X', X.shape, 'y', y.shape, 'X_sub', X_sub.shape)
# ################## ML ########################################
print('START ML', datetime.now(), )
kfolds = KFold(n_splits=10, shuffle=True, random_state=42)
# rmsle
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
# build our model scoring function
def cv_rmse(model, X=X):
rmse = np.sqrt(-cross_val_score(model, X, y,
scoring="neg_mean_squared_error",
cv=kfolds))
return (rmse)
# setup models
alphas_alt = [14.5, 14.6, 14.7, 14.8, 14.9, 15, 15.1, 15.2, 15.3, 15.4, 15.5]
alphas2 = [5e-05, 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008]
e_alphas = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007]
e_l1ratio = [0.8, 0.85, 0.9, 0.95, 0.99, 1]
ridge = make_pipeline(RobustScaler(),
RidgeCV(alphas=alphas_alt, cv=kfolds,))
lasso = make_pipeline(RobustScaler(),
LassoCV(max_iter=1e7, alphas=alphas2,
random_state=42, cv=kfolds))
elasticnet = make_pipeline(RobustScaler(),
ElasticNetCV(max_iter=1e7, alphas=e_alphas,
cv=kfolds, random_state=42, l1_ratio=e_l1ratio))
svr = make_pipeline(RobustScaler(),
SVR(C= 20, epsilon= 0.008, gamma=0.0003,))
gbr = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =42)
lgb_params = {'objective':'regression','num_leaves':4,'learning_rate':0.01,'n_estimators':5000,
'max_bin':200,'bagging_fraction':0.75, 'bagging_freq':5,'bagging_seed':7,'feature_fraction':0.2,
'feature_fraction_seed':7,'verbose':-1,#min_data_in_leaf=2,#min_sum_hessian_in_leaf=11
}
lightgbm = LGBMRegressor(**lgb_params)
xgboost = XGBRegressor(learning_rate=0.01, n_estimators=3460,
max_depth=3, min_child_weight=0,
gamma=0, subsample=0.7,
colsample_bytree=0.7,
objective='reg:linear', nthread=-1,
scale_pos_weight=1, seed=27,
reg_alpha=0.00006, random_state=42)
# stack
stack_gen = StackingCVRegressor(regressors=(ridge, lasso, elasticnet,
gbr, xgboost, lightgbm),
meta_regressor=xgboost,
use_features_in_secondary=True)
print('TEST score on CV')
score = cv_rmse(lightgbm)
print("Lightgbm score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
if False:
score = cv_rmse(ridge)
print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
score = cv_rmse(lasso)
print("Lasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
score = cv_rmse(elasticnet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
score = cv_rmse(svr)
print("SVR score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
score = cv_rmse(gbr)
print("GradientBoosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
score = cv_rmse(xgboost)
print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()), datetime.now(), )
print('START Fit')
print(datetime.now(), 'StackingCVRegressor')
stack_gen_model = stack_gen.fit(np.array(X), np.array(y))
print(datetime.now(), 'elasticnet')
elastic_model_full_data = elasticnet.fit(X, y)
print(datetime.now(), 'lasso')
lasso_model_full_data = lasso.fit(X, y)
print(datetime.now(), 'ridge')
ridge_model_full_data = ridge.fit(X, y)
print(datetime.now(), 'svr')
svr_model_full_data = svr.fit(X, y)
print(datetime.now(), 'GradientBoosting')
gbr_model_full_data = gbr.fit(X, y)
print(datetime.now(), 'xgboost')
xgb_model_full_data = xgboost.fit(X, y)
print(datetime.now(), 'lightgbm')
lgb_model_full_data = lightgbm.fit(X, y)
input("Before blend_models_predict: ")
def blend_models_predict(X):
return ((0.1 * elastic_model_full_data.predict(X)) + \
(0.05 * lasso_model_full_data.predict(X)) + \
(0.1 * ridge_model_full_data.predict(X)) + \
(0.1 * svr_model_full_data.predict(X)) + \
(0.1 * gbr_model_full_data.predict(X)) + \
(0.15 * xgb_model_full_data.predict(X)) + \
(0.1 * lgb_model_full_data.predict(X)) + \
(0.3 * stack_gen_model.predict(np.array(X))))
print('RMSLE score on train data:')
print(rmsle(y, blend_models_predict(X)))
print('Predict submission', datetime.now(),)
submission = pd.read_csv(f"{data_root}/sample_submission.csv")
submission.iloc[:,1] = np.floor(np.expm1(blend_models_predict(X_sub)))
# this kernel gave a score 0.114
# let's up it by mixing with the top kernels
print('Blend with Top Kernals submissions', datetime.now(),)
sub_1 = pd.read_csv(f'{data_root}/top-10-0-10943-stacking-mice-and-brutal-force/House_Prices_submit.csv')
sub_2 = pd.read_csv(f'{data_root}/hybrid-svm-benchmark-approach-0-11180-lb-top-2/hybrid_solution.csv')
sub_3 = | pd.read_csv(f'{data_root}/lasso-model-for-regression-problem/lasso_sol22_Median.csv') | pandas.read_csv |
# This file contains the following function(s): find_duplicates, getnoneditable, countsCDSedits
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from check_CDS import remove_dup
def find_duplicates(df_all):
"""finds the count of the “unique” number of nonsense, nonsense (stop), and missense edits for each CDS.
“Not unique” is defined as the same type of edit at the same amino acid number within the CDS. This function
returns a dataframe (df_CDS) with the CDSs as the row indexes and the unique edit types as the columns. """
Locusarray = df_all.Locus.unique() # can index into
dictCDS = {} # The key is the CDS, and the values is the list of the row index
dictCDScount = {} # The key is the CDS, [the number of unique Nonsense (same), Missense edits, Nonsense stop]
for CDS in Locusarray:
# get index (row id) of the rows with this CDS
listCDS = df_all.index[df_all['Locus'] == CDS].tolist()
dictCDS[CDS] = listCDS
for cds in dictCDS.keys():
Templist = [] # [[AA number,[Change],[AA number, [Change]]
countnonsense = 0
countstop = 0
countmissense = 0
for row in dictCDS[cds]: # for every row in the list of indexes
#print(row)
AAnum = df_all.loc[row, "AA Number"]
OAA = df_all.loc[row, "Old AA Code"]
NAA = df_all.loc[row, "New AA Code"]
Term = df_all.loc[row, "Term"]
tempcode = [AAnum,[OAA,NAA]]
if tempcode not in Templist:
Templist.append(tempcode)
if Term == "Missense":
countmissense+=1
elif Term == "Nonsense":
countnonsense+=1
else:
countstop+=1
#print(Templist)
dictCDScount[cds] = [countnonsense,countmissense,countstop]
countCDSunique_df = pd.DataFrame.from_dict(dictCDScount, orient='index')
# countCDSunique_df.rename(columns={0: 'Unique Nonsense', 1: 'Unique Missense', 2: 'Unique Nonsense (stop)'}, inplace=True)
countCDSunique_df.rename(columns={0: 'Unique Nonsense', 1: 'Unique Missense', 2: 'Unique Nonsense (stop)'}, inplace=True)
return countCDSunique_df
def getnoneditable(df_all, dictCDSinfo):
"""makes a dictionary of all the CDS that are not editable and the count based on top and bottom strand."""
TotalCDSlist = dictCDSinfo.keys() # list of all the genes
EditedCDSlist = df_all.loc[:, "Locus"].to_list()
EditedCDSlistnondup = remove_dup(EditedCDSlist)
nonedit_dict = {} # key is the CDS, [number of editing, strand]
count_T = 0 # counter for the non-editable on top strand
count_B = 0 # counter for the non-editable on bottom strand
for gene in TotalCDSlist:
if gene not in EditedCDSlistnondup:
nonedit_strand = dictCDSinfo[gene][1]
nonedit_dict[gene] = [nonedit_strand, 0]
if nonedit_strand == "+":
count_T +=1
else:
count_B +=1
return nonedit_dict, count_T, count_B
def countsCDSedits(countCDSdf_all,df_all,dictCDSinfo):
"""Returns a dataframe (with all CDSs, their strand, the total number of editing sites, the unique nonsense,
missense and nonsense (stop) edits), strand of the CDS, the number/count of prematurely stoppable CDSs in
top strand, the number/count of prematurely stoppable CDSs in bottom strand, the number/count of non-editable CDSs
on the top strand, the number/count of non-editable CDSs on the bottom strand. The function also prints the CDSs
that are not stoppable and the total count. This function uses both find_duplicates and getnoneditable."""
# Find which strand the CDSs are on
# Makes a new list with the order of the CDSs in the combined CDS dataframe (countCDSdf_all)
# Then you add the "strand_list" to the dataframe
strand_list = []
for i in countCDSdf_all.index:
# i = the CDS/locus
strand = dictCDSinfo[i][1]
strand_list.append(strand)
countCDSdf_all.insert(0, "Strand", strand_list, True)
# gets a dictionary of non-editable genes and the number based on top/bottom strand
noneditCDSdict, count_nonedit_T, count_nonedit_B = getnoneditable(df_all, dictCDSinfo)
noneditCDS_df = pd.DataFrame.from_dict(noneditCDSdict, orient='index', columns=['Strand', "Total Number Editing Sites"])
allCDS_df =pd.concat([countCDSdf_all, noneditCDS_df], sort=False)
#find the unique counts
countCDSunique_df = find_duplicates(df_all)
countCDS_df = | pd.concat([allCDS_df, countCDSunique_df], axis=1, sort=False) | pandas.concat |
from .metrics import accuracy
from .metrics import topk_acc
from .metrics import generalized_distance_matrix
from .metrics import generalized_distance_matrix_torch
from .chemspace import get_drug_batch
from typing import Optional, Sequence, Tuple, Union
import scipy.io as sio
import scipy.stats as st
from scipy import sparse
import numpy as np
import pandas as pd
import anndata as ad
import seaborn as sns
import networkx as nx
import community
import toolz as tz
import tqdm
import os
import collections
from sklearn import metrics
from sklearn.utils import sparsefuncs
from sklearn.neighbors import kneighbors_graph
from sklearn.mixture import GaussianMixture as GMM
from joblib import Parallel, delayed
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, IterableDataset, DataLoader
import torch_geometric
from torch_geometric.data import Batch
from rdkit import Chem
from rdkit.Chem import AllChem, Draw
import matplotlib.pyplot as plt
# TO-DO: Refactor trainers to work with Data objects,
# for different models not to be hard coded only based
# on their number of inputs (e.g. supervised {x,y},
# cond_generator {x,g}, etc...)
class Data:
"""
Abstract data class to wrap data in ML models.
"""
def __init__(
self,
x,
y=None,
g=None):
"""
Params
------
x (np.array | torch.tensor):
Input data to the model
"""
self.x = x
self.y = y
self.g = g
def train_supervised_gcn(
model:nn.Module,
data:torch_geometric.data.Data,
loss_fn,
optimizer,
multiclass = False,
n_out = 1
)->Tuple[float, float]:
"""
Single fwd-bwd pass on GraphConvNet model.
Returns loss and accuracy.
"""
y_true = torch.tensor(data.y, dtype = torch.long)
optimizer.zero_grad()
y_pred = model(data)
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss_fn(
y_pred.float(),
y_true.reshape(-1, n_out).float()
)
acc = accuracy(y_pred, y_true)
loss.backward()
optimizer.step()
return loss, acc
def val_supervised_gcn(
model, data, loss_fn, multiclass = False, n_out = 1
)-> float:
y_pred = model(data)
#y_true = torch.from_numpy(np.array(data.y, dtype=np.int16)) #, device = device)
y_true = torch.tensor(data.y, dtype = torch.long)
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss = loss_fn(
y_pred.float(),
y_true.reshape(-1, n_out).float()
)
acc = accuracy(y_pred, y_true)
return loss.mean(), acc
def supervised_trainer_gcn(
n_epochs:int,
train_loader,
val_loader,
model,
criterion,
optimizer,
multiclass= False,
n_classes = 1,
logs_per_epoch = 5,
model_dir:str = None,
model_name:str = None,
early_stopping_tol:float = 0.3,
force_cpu=False
)-> Tuple[list, np.ndarray, np.ndarray]:
"""
Wrapper function to train a GNN, returns train and val loss, and val accuracy.
Currently designed for classification problems.
Params
------
n_epochs (int)
Number of forward-backward passes through all the training data.
train_loader, val_loader
torch_geometric.data.Dataloaders of training and validation set.
The validation set is used for estimating model convergence.
model (nn.Module)
Supervised neural net model.
criterion (torch.nn.modules.loss object)
Loss function.
optimizer (torch.optim object)
Optimizer, e.g. Adam or RMSProp.
multiclass (bool, default = False)
Whether the model is a softmax classification model.
n_classes (int, default = 1)
Dimensionality of output dimension.
model_dir (str, default = None)
Path to store trained models.
If set to None it will not store the model's weights.
model_name (str, default = None)
Filename of the model to be stored. If set to None and `model_dir` is specified,
the model will be stored as `model.pt`.
early_stopping_tol (float, default = 0.1)
Tolerance to stop the training.
It is used as the fractional increase in the validation loss
in order to stop the training. I.e. in pseudocode:
Stop if val_loss[i] > (1+early_stopping_tol)*val_loss[i-1]
The higher the value the more tolerant to run for the number of epochs.
If the value is small the traning loop can be too sensitive to small
increases in the validation loss.
"""
batch_size = train_loader.batch_size
print_every = np.floor(train_loader.dataset.__len__() / batch_size / logs_per_epoch) # minibatches
train_loss_vector = [] # to store training loss
val_loss_vector = np.empty(shape = n_epochs)
val_acc_vector = np.empty(shape = n_epochs)
cuda = False if force_cpu else torch.cuda.is_available()
if cuda and not force_cpu:
device = try_gpu()
torch.cuda.set_device(device)
model = model.to(device)
for epoch in np.arange(n_epochs):
running_loss = 0
# TRAINING LOOP
for ix, data in tqdm.tqdm(enumerate(train_loader)):
#input_tensor = data.view(batch_size, -1).float()
if cuda:
#data.edge_attr = data.edge_attr.cuda()
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
train_loss, train_acc = train_supervised_gcn(
model,
data, # graph and label in data object
criterion,
optimizer,
multiclass=multiclass,
n_out =n_classes
)
running_loss += train_loss.item()
# Print loss
if ix % print_every == print_every -1 :
# Print average loss
print('[%d, %5d] loss: %.3f' %
(epoch + 1, ix+1, running_loss / print_every))
train_loss_vector.append(running_loss / print_every)
# Reinitialize loss
running_loss = 0.0
# VALIDATION LOOP
model.eval()
with torch.no_grad():
validation_loss = []
val_accuracy = []
for i, data in enumerate(tqdm.tqdm(val_loader)):
if cuda:
data.edge_attr = data.edge_attr.cuda()
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
val_loss, val_acc = val_supervised_gcn(
model, data, criterion, multiclass, n_classes
)
validation_loss.append(val_loss)
val_accuracy.append(val_acc)
mean_val_loss = torch.tensor(validation_loss).mean()
mean_accuracy = torch.tensor(val_accuracy).mean()
val_loss_vector[epoch] = mean_val_loss
val_acc_vector[epoch] = mean_accuracy
print('Val. loss %.3f'% mean_val_loss)
print('Val. acc %.3f'% (mean_accuracy*100))
# EARLY STOPPING LOOP
if epoch > 0:
if val_loss_vector[epoch] > (1+early_stopping_tol)*val_loss_vector[epoch-1]:
print('Finished by early stopping at epoch %d'%(epoch))
return train_loss_vector, val_loss_vector, val_acc_vector
# SAVE MODEL
if model_dir is not None:
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if model_name is not None:
torch.save(
model.state_dict(),
os.path.join(model_dir, model_name + '_' + str(epoch) + '.pt')
)
else:
torch.save(
model.state_dict(),
os.path.join(model_dir, 'model' + '_' + str(epoch) + '.pt')
)
print('Finished training')
return train_loss_vector, val_loss_vector, val_acc_vector
def train_supervised(
model,
input_tensor,
y_true,
loss_fn,
optimizer,
multiclass =False,
n_out = 1,
):
"""
Wrapper function to make forward and backward pass with minibatch
using a supervised model (classification or regression).
Params
------
n_out (int, default = 1)
Dimensionality of output dimension. Leave as 1 for multiclass,
i.e. the output is a probability distribution over classes (e.g. MNIST).
"""
# Zero out grads
model.zero_grad()
y_pred = model(input_tensor)
#Note that if it's a multiclass classification (i.e. the output is a
# probability distribution over classes) the loss_fn
# nn.NLLLoss(y_pred, y_true) uses as input y_pred.size = (n_batch, n_classes)
# and y_true.size = (n_batch), that's why it doesn't get reshaped.
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else: # Backprop error
loss = loss_fn(y_pred, y_true.view(-1, n_out).float())
try:
acc = accuracy(y_pred, y_true.view(-1, n_out).float())
except:
acc = None
loss.backward()
# Update weights
optimizer.step()
return loss, acc
def validation_supervised(model, input_tensor, y_true, loss_fn, multiclass =False, n_classes= 1):
"""
Returns average loss for an input batch of data with a supervised model.
If running on multiclass mode, it also returns the accuracy.
"""
y_pred = model(input_tensor.float())
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss_fn(y_pred, y_true.view(-1, n_classes).float())
try:
acc = accuracy(y_pred, y_true.view(-1, n_out).float())
except:
acc = None
return loss.mean().item(), acc
def supervised_trainer(
n_epochs:int,
train_loader:DataLoader,
val_loader:DataLoader,
model:nn.Module,
criterion,
optimizer,
multiclass:bool = False,
n_classes:int = 1,
logs_per_epoch:int = 5,
train_fn:callable = train_supervised,
model_dir:str = None,
model_name:str = None,
early_stopping_tol:float = 0.2,
**kwargs
):
"""
Wrapper function to train a supervised model for n_epochs.
Currently designed for classification and regression.
Returns train loss, validation loss and accuracy.
Params
------
n_epochs (int)
Number of forward-backward passes through all the training data.
train_loader, val_loader
Torch dataloaders of training and validation set. The validation set
is used for estimating model convergence.
model (nn.Module)
Supervised neural net model.
criterion (torch.nn.modules.loss object)
Loss function.
optimizer (torch.optim object)
Optimizer, e.g. Adam or RMSProp.
multiclass (bool, default = False)
Whether the model is a softmax classification model.
n_classes (int, default = 1)
Dimensionality of output dimension. Leave as 1 for multiclass,
i.e. the output is a probability distribution over classes (e.g. MNIST).
model_dir (str, default = None)
Path to store trained models. If set to None it will not store the model weights.
model_name (str, default = None)
Filename of the model to be stored. If set to None and `model_dir` is specified,
the model will be stored as `model.pt`
early_stopping_tol (float, default = 0.1)
Tolerance to stop the training.
It is used as the fractional increase in the validation loss
in order to stop the training. I.e. in pseudocode:
Stop if val_loss[i] > (1+early_stopping_tol)*val_loss[i-1]
The higher the value the more tolerant to run for the number of epochs.
If the value is small the traning loop can be too sensitive to small
increases in the validation loss.
**kwargs
All kwargs go to the train_fn and val_fn functions.
Returns
-------
train_loss_vector(array-like)
List with loss at every minibatch, of size (minibatch*n_epochs).
val_loss_vector(array-like)
Numpy array with validation loss for every epoch.
"""
batch_size = train_loader.batch_size
print_every = np.floor(train_loader.dataset.__len__() / batch_size / logs_per_epoch) # minibatches
train_loss_vector = [] # to store training loss
val_loss_vector = np.empty(shape = n_epochs)
val_acc_vector = np.empty(shape = n_epochs)
cuda = torch.cuda.is_available()
if cuda:
device = try_gpu()
torch.cuda.set_device(device)
model = model.to(device)
for epoch in np.arange(n_epochs):
running_loss = 0
# TRAINING LOOP
for ix, (data, y_true) in enumerate(tqdm.tqdm(train_loader)):
if len(data.shape)<4:
data = data.view(batch_size, -1).float()
if cuda:
data = data.cuda(device = device)
y_true = y_true.cuda(device = device)
train_loss, train_acc = train_fn(
model,
data,
y_true,
criterion,
optimizer,
multiclass=multiclass,
n_out =n_classes,
**kwargs
)
running_loss += train_loss.item()
# Print loss
if ix % print_every == print_every -1 :
# Print average loss
print('[%d, %6d] loss: %.3f' %
(epoch + 1, ix+1, running_loss / print_every))
train_loss_vector.append(running_loss / print_every)
# Reinitialize loss
running_loss = 0.0
# VALIDATION LOOP
with torch.no_grad():
model.eval()
validation_loss = []
validation_accuracy = []
for i, (data, y_true) in enumerate(tqdm.tqdm(val_loader)):
if len(data.shape)<4: # if not images
data = data.view(batch_size, -1).float()
if cuda:
data = data.cuda(device = device)
y_true = y_true.cuda(device = device)
val_loss, val_acc = validation_supervised(
model, data, y_true, criterion, multiclass, n_classes
)
validation_loss.append(val_loss)
validation_accuracy.append(val_acc)
mean_val_loss = torch.tensor(validation_loss).mean().item()
mean_val_acc = torch.tensor(validation_accuracy).mean().item()
val_loss_vector[epoch] = mean_val_loss
val_acc_vector[epoch] = mean_val_acc
print('Val. loss %.3f'% mean_val_loss)
print('Val. accuracy %.3f'% (mean_val_acc*100))
# EARLY STOPPING LOOP
if epoch > 0:
if val_loss_vector[epoch] > (1+early_stopping_tol)*val_loss_vector[epoch-1]:
print('Finished by early stopping at epoch %d'%(epoch))
return train_loss_vector, val_loss_vector
# SAVE MODEL
if model_dir is not None:
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if model_name is not None:
torch.save(
model.state_dict(),
os.path.join(model_dir, model_name + '_' + str(epoch) + '.pt')
)
else:
torch.save(
model.state_dict(),
os.path.join(model_dir, 'model' + '_' + str(epoch) + '.pt')
)
print('Finished training')
return train_loss_vector, val_loss_vector, val_acc_vector
def print_loss_in_loop(epoch, idx_batch, running_loss, print_every, message='loss'):
print_msg = '[%d, %5d] ' + message + ' : %.3f'
print(print_msg%\
(epodch + 1, idx_batch+1, running_loss / print_every))
def supervised_model_predict(
model:nn.Module,
data_loader,
criterion,
n_points = None,
n_feats= None,
multiclass=False,
n_outputs =1,
score = True
):
"""
Analog to model.predict_proba() from sklearn. Returns a prediction vector given a torch dataloder
and model. It is designed for working with basic supervised models like binary or multilabel
classification, and regression.
Params
------
model (torch.nn.model)
Trained supervised model.
data_loader
n_points (int)
Number of instances (rows) in the dataset. If not provided, the function will
try to extract it from the dataloader.
n_feats (int)
Input dimensions for the model / number of columns in the dataset. If not provided,
the function will try to extract it from the dataloader.
n_outputs (int, default = 1)
Number of outputs of the model. Defaults to 1 dim output, for regression or
binary classification.
Returns
-------
y_pred (np.array)
Array with raw predictions from a forward pass of the model.
"""
if n_points == None and n_feats == None:
try:
n_points, n_feats = data_loader.dataset.data.shape
except:
print('Need to supply number of datapoints and features in input data.')
batch_size = data_loader.batch_size
cuda = torch.cuda.is_available()
device = try_gpu()
model = model.to(device)
# Initialize predictions array
y_pred = torch.zeros(n_points, n_outputs)
if score:
cum_sum_loss = 0
cum_sum_acc = 0
with torch.no_grad():
for ix, (x, y) in tqdm.tqdm(enumerate(data_loader)):
if cuda:
x= x.cuda()
if cuda and score:
y =y.cuda()
# Reshape input for feeding to model
x = x.view(-1, n_feats)
outputs = model(x.float())
y_pred[ix * batch_size : ix * batch_size + batch_size, :] = outputs
if score:
if multiclass:
if cuda:
mean_loss = criterion(outputs, y).mean().cpu().detach().numpy()
else:
mean_loss = criterion(outputs, y).mean().detach().numpy()
acc = accuracy(y, outputs.argmax(axis = 1))#.item()
else:
if cuda:
mean_loss = criterion(outputs, y.view(-1, n_outputs).float()).mean().cpu().detach().numpy()
else:
mean_loss = criterion(outputs, y.view(-1, n_outputs).float()).mean().detach().numpy()
acc = accuracy(y.view(-1, n_outputs), outputs.argmax(axis = 1))#.mean().item()
cum_sum_loss+= mean_loss
cum_sum_acc +=acc
moving_avg_acc = cum_sum_acc / (ix+1)
moving_avg_loss = cum_sum_loss / (ix + 1)
if score:
print("Mean accuracy: %.2f" %moving_avg_acc)
print("Mean validation loss: %.2f"%moving_avg_loss)
return y_pred.detach().numpy()
def get_positive_negative_indices_batch(
y_true:torch.Tensor, index_dict:dict, cuda:bool = None
)->Tuple[np.array, np.array, np.array]:
"""
Returns indices for positive and negative anchors,
to use in metric learning using hinge triplet loss,
given labels (y_true) for multiclass classification.
Params
------
y_true(torch.Tensor)
Labels from sample codes.
cuda (bool, default = None)
Whether cuda is available for use.
Returns
-------
positive_anchor_ixs, negative_anchor_ixs, perm_labels
"""
max_index = max(index_dict.keys())
# Get cuda status
if cuda is None:
cuda = torch.cuda.is_available()
# Send labels to cpu
if cuda:
y_true = y_true.cpu()
# Make labels from torch.tensor -> numpy array
labels = y_true.numpy()
# Shuffle labels
perm_labels= np.random.permutation(labels)
# Check if any of shuffled labels didn't change
ix_eq = (labels == perm_labels)
# Get the indices where those unchanged labels reside
ix_to_flip = np.nonzero(ix_eq)[0]
# Enter loop if the permuted labels
# and the original labels coincide in an entry
if len(ix_to_flip) >= 1:
# If any of the labels to flip is the last code
# subtract as adding would result in error
label_flip_max_code = np.any(perm_labels[ix_to_flip] == max_index)
label_flip_min_code = np.any(perm_labels[ix_to_flip] == 0)
# Unlikely case where the batch contains both
# the first and last index
# this will cause the hinge loss to be the margin
if label_flip_max_code and label_flip_min_code:
print('At least one label in the positive and negative are the same')
pass
elif label_flip_max_code and not label_flip_min_code:
perm_labels[ix_to_flip] = perm_labels[ix_to_flip] - 1
# Fall back to add an index
else :
perm_labels[ix_to_flip] = perm_labels[ix_to_flip] + 1
# Check that all labels are different
#assert np.all(labels != perm_labels)
# Get anchor indices for samples
positive_anchor_ixs = [np.random.choice(index_dict[l], size = 1)[0] for l in labels]
negative_anchor_ixs = [np.random.choice(index_dict[l], size = 1)[0] for l in perm_labels]
return positive_anchor_ixs, negative_anchor_ixs, perm_labels
class JointEmbeddingTrainer:
"""
Class for training the joint embedding model.
Notes
-----
* Assumes both adata and df_drugs have coinciding names in the column
`drug_name`. Also assumes that adata has a column called `sample_codes`,
that are the numerical encoding of each drug name, i.e. that there's a
mapping {'drug_1': 0, ..., 'drug_n': (n-1)}.
* `indices` are needed when using a graph dataloader
(no .data attribute in dataloader object).
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
#index_dict_train:dict,
#index_dict_test:dict,
#name_to_mol:dict,
#ix_to_name:dict,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
indices=None,
force_cpu = False
):
"""
Params
------
adata(ad.AnnData)
Base anndata, contains both train and validation sets.
df_drugs(pd.DataFrame)
Pandas df containing mols in train and val sets.
regressor_loss (torch.nn.loss, default=None)
A torch loss function, e.g. nn.MSELoss
"""
#device = try_gpu()
#self.device = device
self.model = model
self.batch_size = batch_size
self.adata = adata
self.train_loader, self.val_loader = train_loader, val_loader
self.cuda = False if force_cpu else torch.cuda.is_available()
self.device = torch.device('cpu') if force_cpu == True else try_gpu()
if self.cuda:
self.model = self.model.to(self.device)
self.n_epochs = n_epochs
self.hinge_loss = nn.TripletMarginLoss(margin=margin, p=p_norm_metric)
self.criterion = nn.NLLLoss()
self.ordering_labels = torch.arange(batch_size).to(self.device)
self.contrastive = contrastive_learning #bool
self.metric = metric_learning #bool
self.n_train_batches = len(train_loader.dataset) // batch_size
self.n_test_batches = len(val_loader.dataset) // batch_size
self.optimizer = torch.optim.Adam(self.model.parameters(), lr = lr)
if self.contrastive == False and self.metric ==False:
raise AssertionError(
'Either one or both of contrastive learning and metric learning have to be active.'
)
self.model_name, self.model_dir = model_name, model_dir
if indices is not None:
self.index_dict_train = indices["train"]
self.index_dict_test = indices["test"]
else:
# Groupby on train adata
gb_train = train_loader.dataset.data.obs.groupby('sample_code')
index_dict_train = {}
for ix, data in gb_train:
index_dict_train[ix] = data.index.values
gb_test = val_loader.dataset.data.obs.groupby('sample_code')
index_dict_test = {}
for ix, data in gb_test:
index_dict_test[ix] = data.index.values
self.index_dict_train = index_dict_train
self.index_dict_test = index_dict_test
self.name_to_mol = dict(df_drugs[['drug_name', 'mol']].values)
self.ix_to_name = dict(adata.obs[['sample_code', 'drug_name']].values)
self.extra_head = extra_head
#if self.extra_head:
# self.regressor_loss = nn.MSELoss()
def contrastive_learning_loop(self, mol_embedding, cell_embedding):
"""Returns contrastive learning loss and cross-retrieval accuracy for a minibatch."""
# Make batch of molecular graphs
cell_embedding_norm = cell_embedding / cell_embedding.norm(dim= -1, keepdim = True)
mol_embedding_norm = mol_embedding / mol_embedding.norm(dim= -1, keepdim = True)
# Extract learnt scalar
logit_scale = self.model.logit_scale.exp()
# Get cosine similarities
# returns tensor of shape (mols, cells)
logits = logit_scale * mol_embedding_norm @ cell_embedding_norm.t()
# Get classification predictions across axes
y_pred_mols = F.log_softmax(logits, dim = 1)
y_pred_cells = F.log_softmax(logits, dim = 0)
# Calculate accuracies
mol_acc = accuracy(y_pred_mols.argmax(axis =1), self.ordering_labels)
cell_acc = accuracy(y_pred_cells.argmax(axis = 0), self.ordering_labels)
acc = (cell_acc + mol_acc)/ 2
# Compute contrastive learning loss
loss_mols = self.criterion(y_pred_mols, self.ordering_labels)
loss_cells = self.criterion(y_pred_cells, self.ordering_labels)
cl_loss = (loss_mols + loss_cells)/2
return cl_loss, acc
def metric_learning_loop(self, y_true, cell_embedding, mol_embedding):
"""Returns average hinge loss from cells2mols and mols2cells for a minibatch."""
pos_cell_ixs, neg_cell_ixs, perm_y_labels = get_positive_negative_indices_batch(
y_true, self.index_dict_train, cuda = self.cuda
)
# Get positive and negative anchors for cells
positive_anchors_cells = torch.from_numpy(self.adata[pos_cell_ixs].X.A)
negative_anchors_cells = torch.from_numpy(self.adata[neg_cell_ixs].X.A)
if self.cuda:
positive_anchors_cells = positive_anchors_cells.cuda()
negative_anchors_cells = negative_anchors_cells.cuda()
# Get negative anchors for molecules
permuted_molecule_batch = Batch.from_data_list(
get_drug_batch(
torch.from_numpy(perm_y_labels),
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
)
# Compute embeddings
positive_cell_embeddings = self.model.encode_cell(positive_anchors_cells)
negative_cell_embeddings = self.model.encode_cell(negative_anchors_cells)
permuted_molecule_embeddings = self.model.encode_molecule(permuted_molecule_batch)
# Compute metric learning loss
# (anchor, positive, negative)
hinge_cells_anchor = self.hinge_loss(
cell_embedding, mol_embedding, permuted_molecule_embeddings
)
hinge_mols_anchor = self.hinge_loss(
mol_embedding, positive_cell_embeddings, negative_cell_embeddings
)
metric_learning_loss = (hinge_cells_anchor + hinge_mols_anchor)/2
return metric_learning_loss
def mol_regressor_loop(self, mol_embedding, y_regressor, lambda_reg=1):
out = self.model.extra_head(mol_embedding)
reg_loss = self.regressor_loss(out, y_regressor)
return lambda_reg*reg_loss
def train_step(self, input_tensor, y_true):
"A single training step for a minibatch."
self.model.zero_grad()
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(
y_true,
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
if not self.metric:
cl_loss.backward()
self.optimizer.step()
results_dict = {
'train_loss': {
'contrastive_loss': cl_loss.item(),
'metric_learning_loss': None
},
'train_acc': train_acc
}
return results_dict
if self.metric:
metric_learning_loss = self.metric_learning_loop(y_true, cell_embedding, mol_embedding)
if not self.contrastive:
metric_learning_loss.backward()
self.optimizer.step()
results_dict = {
'train_loss': {'contrastive_loss': None,'metric_learning_loss': met_loss.item()},
'train_acc': None
}
return results_dict
#if self.contrastive and self.metric:
# both contrastive and metric learning active
loss = cl_loss + metric_learning_loss
loss.backward()
self.optimizer.step()
results_dict = {
"train_loss": {
"contrastive_loss": cl_loss.item(),
"metric_learning_loss": metric_learning_loss.item(),
},
"train_acc": train_acc,
}
return results_dict
@torch.no_grad()
def val_step(self, input_tensor, y_true):
"""
"""
#self.model.eval()
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(
y_true,
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
if not self.metric:
results_dict = {
'test_loss': {'contrastive_loss': cl_loss.item(),'metric_learning_loss': None},
'test_acc': test_acc
}
return results_dict
if self.metric:
metric_learning_loss = self.metric_learning_loop(y_true, cell_embedding, mol_embedding)
if not self.contrastive:
results_dict = {
'test_loss': {'contrastive_loss': None,'metric_learning_loss': met_loss.item()},
'test_acc': None
}
return results_dict
#if self.contrastive and self.metric:
# else: both contrastive and metric learning active
loss = cl_loss + metric_learning_loss
results_dict = {
"test_loss": {
"contrastive_loss": cl_loss.item(),
"metric_learning_loss": metric_learning_loss.item(),
},
"test_acc": test_acc,
}
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns the train and validation loss and accuracy as dataframes.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, (input_tensor, y_true) in tqdm.tqdm(enumerate(self.train_loader)):
# Train step
results_dict_train = self.train_step(input_tensor, y_true)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
#df_train_loss['epoch'] = epoch + 1
#df_train_acc['epoch'] = epoch +1
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d \n'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
for ix, (input_tensor, y_true) in tqdm.tqdm(enumerate(self.val_loader)):
# Val step
results_dict_test = self.val_step(input_tensor, y_true)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
#df_test_loss['epoch'] = epoch + 1
#df_test_acc['epoch'] = epoch + 1
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs #df_train_agg, df_test_agg
class JointEmbeddingTrainerV3(JointEmbeddingTrainer):
"""
Class for training the joint embedding using an extra regressor to combine
information from the molecule embedding and a profile of binding energies.
Designed to work with an MLP cell encoder.
TO-DO: Generalize and integrate all JointEmbeddinTrainer classes to a single
general one.
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
lambda_reg = 1,
regressor_loss=None,
indices=None,
force_cpu=False
):
super().__init__(
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr = lr,
n_epochs = n_epochs,
metric_learning = metric_learning,
contrastive_learning = contrastive_learning,
p_norm_metric = p_norm_metric,
margin = margin,
model_name = model_name,
model_dir = model_dir,
extra_head = extra_head,
indices = indices,
force_cpu = force_cpu
)
self.lambda_reg = lambda_reg
self.regressor_loss = regressor_loss
def train_step(self, input_tensor, y_true, y_regressor=None):
loss=0
results_dict={ "train_loss": {} }
self.model.zero_grad()
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# This is the part that changes:
h = torch.cat([mol_embedding, y_regressor], dim = -1)
mol_embedding = self.model.extra_head(h)
# Run through all modes of the model
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
loss+=cl_loss
results_dict["train_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["train_acc"]=train_acc
else:
results_dict["train_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
loss+=metric_learning_loss
results_dict["train_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["metric_learning_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
# We don't take into account regressor loss,
# if self.extra_head:
# reg_loss=self.mol_regressor_loop(
# mol_embedding, y_regressor, lambda_reg=self.lambda_reg
# )
#
# loss+=reg_loss
#
# results_dict["train_loss"]["regressor_loss"]=reg_loss.item()
# if "train_acc" not in results_dict.keys():
# results_dict["train_acc"]=None
# else:
# results_dict["train_loss"]["regressor_loss"]=None
# if "train_acc" not in results_dict.keys():
# results_dict["train_acc"]=None
#Backprop and update weights
loss.backward()
self.optimizer.step()
return results_dict
@torch.no_grad()
def val_step(self, input_tensor, y_true, y_regressor=None):
self.model.eval()
results_dict={"test_loss": {}} # init results dictionary
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# Here's the part that changes
h = torch.cat([mol_embedding, y_regressor], dim = -1)
mol_embedding = self.model.extra_head(h)
# Run through all modes of the model
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
results_dict["test_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["test_acc"]=test_acc
else:
results_dict["test_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
results_dict["test_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["test_loss"]["metric_learning_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
## No need to log regresor loss
# if self.extra_head:
# reg_loss=self.mol_regressor_loop(
# mol_embedding, y_regressor, lambda_reg =self.lambda_reg
# )
#
# results_dict["test_loss"]["regressor_loss"]=reg_loss.item()
# if "test_acc" not in results_dict.keys():
# results_dict["test_acc"]=None
# else:
# results_dict["regressor_loss"]=None
# if "test_acc" not in results_dict.keys():
# results_dict["test_acc"]=None
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns
-------
train_logs (pd.dataframe)
Train loss and accuracy.
validation_logs (pd.dataframe)
Val loss and accuracy.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, (input_tensor, y_true, y_regressor) in tqdm.tqdm(enumerate(self.train_loader)):
# Train step
results_dict_train = self.train_step(input_tensor, y_true, y_regressor)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
#mean_mse = df_train_loss.regressor_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
#print('Train regression loss: %.3f '%(mean_mse if mean_mse is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
for ix, (input_tensor, y_true, y_regressor) in tqdm.tqdm(enumerate(self.val_loader)):
# Val step
results_dict_test = self.val_step(input_tensor, y_true, y_regressor)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
#mean_val_mse = df_test_loss.regressor_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
#print('Val regression loss: %.3f '%(mean_val_mse if mean_val_mse is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs
class JointEmbeddingTrainerV2(JointEmbeddingTrainer):
"""
Class for training the joint embedding model using an auxiliary regressor task.
Notes
-----
Assumes both adata and df_drugs have coinciding names in the column
`drug_name`. Also assumes that adata has a column called `sample_codes`,
that are the numerical encoding of each drug name, i.e. that there's a
mapping {'drug_1': 0, ..., 'drug_n': (n-1)}.
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
lambda_reg = 1,
regressor_loss=None,
indices=None,
force_cpu=False
):
super().__init__(
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr = lr,
n_epochs = n_epochs,
metric_learning = metric_learning,
contrastive_learning = contrastive_learning,
p_norm_metric = p_norm_metric,
margin = margin,
model_name = model_name,
model_dir = model_dir,
extra_head= extra_head,
indices=indices,
force_cpu=force_cpu
)
self.lambda_reg = lambda_reg
self.regressor_loss = regressor_loss
def train_step(self, input_tensor, y_true, y_regressor=None):
loss=0
results_dict={ "train_loss": {} }
self.model.zero_grad()
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# Run through all modes of the model
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
loss+=cl_loss
results_dict["train_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["train_acc"]=train_acc
else:
results_dict["train_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
loss+=metric_learning_loss
results_dict["train_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["metric_learning_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
if self.extra_head:
reg_loss=self.mol_regressor_loop(
mol_embedding, y_regressor, lambda_reg=self.lambda_reg
)
loss+=reg_loss
results_dict["train_loss"]["regressor_loss"]=reg_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["regressor_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
#Backprop and update weights
loss.backward()
self.optimizer.step()
return results_dict
@torch.no_grad()
def val_step(self, input_tensor, y_true, y_regressor=None):
self.model.eval()
results_dict={"test_loss": {}} # init results dictionary
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# Run through all modes of the model
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
results_dict["test_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["test_acc"]=test_acc
else:
results_dict["test_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
results_dict["test_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["test_loss"]["metric_learning_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
if self.extra_head:
reg_loss=self.mol_regressor_loop(
mol_embedding, y_regressor, lambda_reg =self.lambda_reg
)
results_dict["test_loss"]["regressor_loss"]=reg_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["regressor_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
#No backprop
#loss.backward()
#self.optimizer.step()
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns
-------
train_logs (pd.dataframe)
Train loss and accuracy.
validation_logs (pd.dataframe)
Val loss and accuracy.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, (input_tensor, y_true, y_regressor) in tqdm.tqdm(enumerate(self.train_loader)):
# Train step
results_dict_train = self.train_step(input_tensor, y_true, y_regressor)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
mean_mse = df_train_loss.regressor_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
print('Train regression loss: %.3f '%(mean_mse if mean_mse is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
for ix, (input_tensor, y_true, y_regressor) in tqdm.tqdm(enumerate(self.val_loader)):
# Val step
results_dict_test = self.val_step(input_tensor, y_true, y_regressor)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
mean_val_mse = df_test_loss.regressor_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
print('Val regression loss: %.3f '%(mean_val_mse if mean_val_mse is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs
class JointEmbeddingTrainerG3(JointEmbeddingTrainerV2):
"""
Trainer for GNN cell encoder using concat regressor.
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
lambda_reg = 1,
regressor_loss=None,
indices=None,
force_cpu=False,
g_dims = None
):
super().__init__(
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr = lr,
n_epochs = n_epochs,
metric_learning = metric_learning,
contrastive_learning = contrastive_learning,
p_norm_metric = p_norm_metric,
margin = margin,
model_name = model_name,
model_dir = model_dir,
extra_head= extra_head,
lambda_reg = lambda_reg,
regressor_loss=regressor_loss,
indices=indices,
force_cpu=force_cpu
)
self.g_dims = g_dims
def train_step(self, data):
"""
A single training step for a minibatch using graph data
Params
------
data(torch_geometric.Data.data)
Data object containing cell transcriptome in 'x' attribute,
label in 'y', and extra columns in 'g'.
Returns
-------
results_dict (dict)
Dictionary with loss and accuracy logs.
"""
loss=0
results_dict={ "train_loss": {} }
self.model.zero_grad()
# Extract data for minimizing errors in var name change
y_true = torch.tensor(data.y, dtype = torch.long)
if self.extra_head:
y_regressor = data.g.reshape(-1, self.g_dims)
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(data) # this is the thing that changes in GNN
mol_embedding = self.model.encode_molecule(molecule_batch)
# ------ THIS CHANGES IN V3-------
h = torch.cat([mol_embedding, y_regresor], dim=-1)
mol_embedding = self.model.extra_head(h)
# -------------------------------------
# Run through all modes of the model
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
loss+=cl_loss
results_dict["train_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["train_acc"]=train_acc
else:
results_dict["train_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
loss+=metric_learning_loss
results_dict["train_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["metric_learning_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
# if self.extra_head:
# reg_loss=self.mol_regressor_loop(
# mol_embedding, y_regressor, lambda_reg=self.lambda_reg
# )
# loss+=reg_loss
#
# results_dict["train_loss"]["regressor_loss"]=reg_loss.item()
# if "train_acc" not in results_dict.keys():
# results_dict["train_acc"]=None
# else:
# results_dict["train_loss"]["regressor_loss"]=None
# if "train_acc" not in results_dict.keys():
# results_dict["train_acc"]=None
#Backprop and update weights
loss.backward()
self.optimizer.step()
return results_dict
@torch.no_grad()
def val_step(self, data):
"""
A validation forward pass using graph data.
Params
------
data(torch_geometric.Data.data)
Data object containing graph node features in '.x' attribute,
label in '.y', and extra columns in '.g'.
Returns
-------
results_dict (dict)
Dictionary with loss and accuracy logs.
"""
self.model.eval()
results_dict={"test_loss": {}} # init results dictionary
# Extract data
y_true = torch.tensor(data.y, dtype = torch.long)
if self.extra_head:
y_regressor = data.g.reshape(-1, self.g_dims)
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(data) # this is the thing that changes in GNN
mol_embedding = self.model.encode_molecule(molecule_batch)
# --------THIS CHANGES IN V3 -----------
h = torch.cat([mol_embedding, y_regressor], dim = -1)
mol_embedding = self.model.extra_head(h)
# ---------------------------------------
# Run through all modes of the model
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
results_dict["test_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["test_acc"]=test_acc
else:
results_dict["test_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
results_dict["test_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["test_loss"]["metric_learning_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
# if self.extra_head:
# reg_loss=self.mol_regressor_loop(
# mol_embedding, y_regressor, lambda_reg =self.lambda_reg
# )
#
# results_dict["test_loss"]["regressor_loss"]=reg_loss.item()
# if "test_acc" not in results_dict.keys():
# results_dict["test_acc"]=None
# else:
# results_dict["regressor_loss"]=None
# if "test_acc" not in results_dict.keys():
# results_dict["test_acc"]=None
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns the train and validation loss and accuracy as dataframes.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, data in tqdm.tqdm(enumerate(self.train_loader)):
if self.cuda:
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
if self.extra_head:
data.g = data.g.cuda()
# Train step
results_dict_train = self.train_step(data)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
if self.extra_head:
mean_mse = df_train_loss.regressor_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
# No loss logging
# if self.extra_head:
# print('Train regression loss: %.3f '%(mean_mse if mean_mse is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
# no_grad declared in the val_step() func
for ix, data in tqdm.tqdm(enumerate(self.val_loader)):
if self.cuda:
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
# Val step
results_dict_test = self.val_step(data)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
# if self.extra_head:
# mean_val_mse = df_test_loss.regressor_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
# if self.extra_head:
# print('Val regression loss: %.3f '%(mean_val_mse if mean_val_mse is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs
class JointEmbeddingTrainerG(JointEmbeddingTrainerV2):
"""
Trainer allowing for GNN in the cell encoder + extra head regressor.
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
lambda_reg = 1,
regressor_loss=None,
indices=None,
force_cpu=False,
g_dims = None
):
super().__init__(
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr = lr,
n_epochs = n_epochs,
metric_learning = metric_learning,
contrastive_learning = contrastive_learning,
p_norm_metric = p_norm_metric,
margin = margin,
model_name = model_name,
model_dir = model_dir,
extra_head= extra_head,
lambda_reg = lambda_reg,
regressor_loss=regressor_loss,
indices=indices,
force_cpu=force_cpu
)
self.g_dims = g_dims
def train_step(self, data):
"""
A single training step for a minibatch using graph data
Params
------
data(torch_geometric.Data.data)
Data object containing cell transcriptome in 'x' attribute,
label in 'y', and extra columns in 'g'.
Returns
-------
results_dict (dict)
Dictionary with loss and accuracy logs.
"""
loss=0
results_dict={ "train_loss": {} }
self.model.zero_grad()
# Extract data for minimizing errors in var name change
y_true = torch.tensor(data.y, dtype = torch.long)
if self.extra_head:
y_regressor = data.g.reshape(-1, self.g_dims)
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(data) # this is the thing that changes in GNN
mol_embedding = self.model.encode_molecule(molecule_batch)
# Run through all modes of the model
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
loss+=cl_loss
results_dict["train_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["train_acc"]=train_acc
else:
results_dict["train_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
loss+=metric_learning_loss
results_dict["train_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["metric_learning_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
if self.extra_head:
reg_loss=self.mol_regressor_loop(
mol_embedding, y_regressor, lambda_reg=self.lambda_reg
)
loss+=reg_loss
results_dict["train_loss"]["regressor_loss"]=reg_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["regressor_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
#Backprop and update weights
loss.backward()
self.optimizer.step()
return results_dict
@torch.no_grad()
def val_step(self, data):
"""
A validation forward pass using graph data.
Params
------
data(torch_geometric.Data.data)
Data object containing graph node features in '.x' attribute,
label in '.y', and extra columns in '.g'.
Returns
-------
results_dict (dict)
Dictionary with loss and accuracy logs.
"""
self.model.eval()
results_dict={"test_loss": {}} # init results dictionary
# Extract data
y_true = torch.tensor(data.y, dtype = torch.long)
if self.extra_head:
y_regressor = data.g.reshape(-1, self.g_dims)
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(data) # this is the thing that changes in GNN
mol_embedding = self.model.encode_molecule(molecule_batch)
# Run through all modes of the model
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
results_dict["test_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["test_acc"]=test_acc
else:
results_dict["test_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
results_dict["test_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["test_loss"]["metric_learning_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
if self.extra_head:
reg_loss=self.mol_regressor_loop(
mol_embedding, y_regressor, lambda_reg =self.lambda_reg
)
results_dict["test_loss"]["regressor_loss"]=reg_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["regressor_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
#No backprop
#loss.backward()
#self.optimizer.step()
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns the train and validation loss and accuracy as dataframes.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, data in tqdm.tqdm(enumerate(self.train_loader)):
if self.cuda:
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
# Train step
results_dict_train = self.train_step(data)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
if self.extra_head:
mean_mse = df_train_loss.regressor_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
if self.extra_head:
print('Train regression loss: %.3f '%(mean_mse if mean_mse is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
# no_grad declared in the val_step() func
for ix, data in tqdm.tqdm(enumerate(self.val_loader)):
if self.cuda:
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
# Val step
results_dict_test = self.val_step(data)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
if self.extra_head:
mean_val_mse = df_test_loss.regressor_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
if self.extra_head:
print('Val regression loss: %.3f '%(mean_val_mse if mean_val_mse is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs
def train_vae(
model:nn.Module,
input_tensor,
optimizer)->torch.tensor:
"""
Forward-backward pass of a VAE model.
"""
model.zero_grad()
reconstructed, mu, log_var = model(input_tensor)
loss = model.loss(reconstructed, input_tensor, mu, log_var)
# Backprop error
loss.backward()
# Update weights
optimizer.step()
return loss
def validate_vae(
model:nn.Module,
input_tensor,
optimizer
)->torch.tensor:
reconstructed, mu, log_var = model(input_tensor)
loss = model.loss(reconstructed, input_tensor, mu, log_var)
return loss.mean()
def vae_trainer(
n_epochs:int,
train_loader,
val_loader,
model:nn.Module,
optimizer,
conditional_gen = False,
logs_per_epoch = 5):
"""
Wrapper function to train a VAE model for n_epochs.
Params
------
n_epochs(int)
Number of epochs to run the model.
train_loader()
Dataloader for training set.
val_loader()
Dataloader for validation set.
model(nn.Module)
VAE model.
Returns
-------
train_loss_vector
val_loss_vector
"""
batch_size = train_loader.batch_size
print_every = np.floor(
train_loader.dataset.__len__() / batch_size / logs_per_epoch
)
train_loss_vector = []
val_loss_vector = np.empty(shape = n_epochs)
cuda = torch.cuda.is_available()
if cuda:
device = try_gpu()
torch.cuda.set_device(device)
model = model.to(device)
for epoch in np.arange(n_epochs):
running_loss = 0.0
# TRAINING LOOP
for ix, data in enumerate(tqdm.tqdm(train_loader)):
# Reshape minibatch
input_tensor = data.view(batch_size, -1).float()
if cuda:
input_tensor = input_tensor.cuda(device = device)
train_loss = train_vae(model, input_tensor, optimizer)
running_loss +=train_loss.item()
# Print loss
if ix % print_every == print_every -1 : # ix starts at 0
print('[%d, %5d] VAE loss : %.3f' %
(epoch + 1, ix +1, running_loss / print_every)
)
train_loss_vector.append(running_loss / print_every)
# Restart loss
running_loss = 0.0
# VALIDATION LOOP
for ix, data in enumerate(tqdm.tqdm(val_loader)):
validation_loss = []
# Reshape minibatch
input_tensor = data.view(batch_size, -1).float()
if cuda:
input_tensor = input_tensor.cuda(device = device)
val_loss = validate_vae(model, input_tensor, optimizer)
validation_loss.append(val_loss)
mean_val_loss = torch.tensor(validation_loss).mean()
val_loss_vector[epoch] = mean_val_loss
print('Val. loss %.3f'% mean_val_loss)
print('Finished training.')
return train_loss_vector, val_loss_vector
def try_gpu(i=0):
"""
Return gpu(i) if exists, otherwise return cpu().
Extracted from https://github.com/d2l-ai/d2l-en/blob/master/d2l/torch.py
"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
def initialize_network_weights(
net:nn.Module, method = 'kaiming', seed = 4
)-> nn.Module:
"""
Initialize fully connected and convolutional layers' weights
using the Kaiming (He) or Xavier method.
This method is recommended for ReLU / SELU based activations.
"""
torch.manual_seed(seed)
if method == 'kaiming':
for module in net.modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.kaiming_uniform_(module.weight)
try:
nn.init.uniform_(module.bias)
except:
pass
elif isinstance(module, (nn.GRU, nn.LSTM)):
for name, param in module.named_parameters():
if 'bias' in name :
nn.init.uniform_(param)
elif 'weight' in name:
nn.init.kaiming_uniform_(param)
else:
pass
else:
pass
elif method == 'xavier':
for module in net.modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.xavier_uniform_(module.weight)
try:
nn.init.uniform_(module.bias)
except:
pass
elif isinstance(module, (nn.GRU, nn.LSTM)):
for name, param in module.named_parameters():
if 'bias' in name :
nn.init.uniform_(param)
elif 'weight' in name:
nn.init.xavier_uniform_(param)
else:
pass
else:
pass
elif method == 'xavier_normal':
for module in net.modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.xavier_normal_(module.weight)
try:
nn.init.uniform_(module.bias)
except:
pass
elif isinstance(module, (nn.GRU, nn.LSTM)):
for name, param in module.named_parameters():
if 'bias' in name :
nn.init.uniform_(param)
elif 'weight' in name:
nn.init.xavier_normal_(param)
else:
pass
else:
pass
else:
raiseNameError('Method not found. Only valid for `kaiming` or `xavier` initialization.')
return net
class adata_torch_dataset(Dataset):
"Convert an adata to a torch.Dataset"
def __init__(
self, data= None, transform = None, supervised = False,
target_col = None, g_cols = None, multilabel = False)->torch.tensor:
"""
Base class for a single cell dataset in .h5ad, i.e. AnnData format
This object enables building models in pytorch.
It currently supports unsupervised (matrix factorization / autoencoder)
and general supervised (classification/regression) models.
Note: the ToTensor() transform can end up normalizing count matrices.
See more on: https://pytorch.org/docs/0.2.0/_modules/torchvision/transforms.html#ToTensor
Params
------
data (ad.AnnData)
AnnDataset containing the count matrix in the data.X object.
transform (torchvision.transforms, default= None)
A torchvision.transforms-type transformation, e.g. ToTensor()
supervised (bool, default = False)
Indicator variable for supervised models.
target_col (string/array-like, default = None)
If running a supervised model, target_col should be a column
or set of columns in the adata.obs dataframe.
When running a binary or multiclass classifier, the labels
should be in a single column in a int64 format.
I repeat, even if running a multiclass classifier, do not specify
the columns as one-hot encoded. The one-hot encoded vector
will be specified in the classifier model. The reason is that,
nn.CrossEntropyLoss() and the more numerically stable nn.NLLLoss()
takes the true labels as input in integer form (e.g. 1,2,3),
not in one-hot encoded version (e.g. [1, 0, 0], [0, 1, 0], [0, 0, 1]).
When running a multilabel classifier (multiple categorical columns,
e.g ´cell_type´ and `behavior`), specify the columns as a **list**.
In this case, we will use the nn.BCELoss() using the one-hot encoded
labels. This is akin to a multi-output classification.
g_cols(list, default = None)
List of columns in an auxiliary variable for conditional generation.
multilabel (bool, default = False)
Indicator variable to specify a multilabel classifier dataset.
Returns
-------
data_point(torch.tensor)
A single datapoint (row) of the dataset in torch.tensor format.
target(torch.tensor)
If running supervised model, the "y" or target label to be predicted.
"""
self.data = data # This is the h5ad / AnnData
self.supervised = supervised
self.target_col = target_col
self.transform = transform
from scipy import sparse
# Indicator of data being in sparse matrix format.
self.sparse = sparse.isspmatrix(data.X)
self.multilabel = multilabel
self.g_cols = g_cols
if self.multilabel:
from sklearn.preprocessing import OneHotEncoder
# Initialize one hot encoder
enc = OneHotEncoder(sparse = False)
self.one_hot_encoder = enc
n_categories = len(self.target_col)
# Extract target data
y_data = self.data.obs[self.target_col].values.astype(str).reshape(-1, n_categories)
# Build one hot encoder
self.one_hot_encoder.fit(y_data)
# Get one-hot matrix and save as attribute
self.multilabel_codes = self.one_hot_encoder.transform(y_data)
def __len__(self):
return self.data.n_obs
def __getitem__(self, ix):
if type(ix) == torch.Tensor:
ix = ix.tolist()
# Get a single row of dataset and convert to numpy array if needed
if self.sparse:
data_point = self.data[ix, :].X.A.astype(np.float64)
else:
data_point = self.data[ix, :].X.astype(np.float64)
# if self.conv:
# image = image.reshape(1, self.res, self.res)
if self.transform is not None:
data_point = self.transform(data_point)
# Get all columns for multilabel classification codes
if self.supervised and self.multilabel:
target = self.multilabel_codes[ix, :]
#target = self.transform(target)
return data_point, target
# Softmax-classification plus conditional generator
elif self.supervised and self.g_cols is not None:
target = self.data.obs.iloc[ix][self.target_col]
# Extract vector of for conditional generation
g_vars = self.data.obs.iloc[ix][self.g_cols].values.astype(np.float32)
return data_point, target, torch.from_numpy(g_vars)#.view(1,1,-1)
# Get categorical labels for multiclass or binary classification
# or single column for regression (haven't implemented multioutput reg.)
elif self.supervised:
target = self.data.obs.iloc[ix][self.target_col]
#target = self.transform(target)
return data_point, target
# Fallback to unsupervised case.
else:
return data_point
def codes_to_cat_labels(self, one_hot_labels):
"""
Returns categorical classes from labels in one-hot format.
Params
------
one_hot_labels (array-like)
Labels of (a potentially new or predicted) dataset
in one-hot-encoded format.
Returns
-------
cat_labels(array-like, or list of array-like)
Categorical labels of the one-hot encoded input.
"""
cat_labels = self.one_hot_encoder.inverse_transform(one_hot_labels)
return cat_labels
# Make curried to allow kwarg calls on tz.def eval_pipeline()
@tz.curry
def get_count_stats(
adata,
mt_prefix = None,
ribo_prefix = None
)-> ad.AnnData:
"""
Returns an AnnData with extra columns in its `obs` object
for the number of counts per cell `n_counts` (and log10 (counts) ),
abd the number of expressed genes in each cell `n_genes`.
Additionally it can get the fraction of mitochondrial and ribosomal
genes if prefixes are provided.
TODO: Add filtering functionality
Params
------
adata (ad.AnnData)
Input dataset in AnnData format. It should contain a count matrix
(cells x genes) as the `.X` object in the AnnData.
mt_prefix (str, default = 'MT-'):
Prefix to match mitochondrial genes.
For human the prefix is `MT-` and for the mouse is `mt-`.
ribo_prefix(default=None)
For human the prefixes are ('RPS', 'RPL').
Returns
-------
adata (ad.AnnData)
AnnData with columns in the `.obs` dataframe corresponding to
count stats.
"""
if not sparse.isspmatrix_csr(adata.X):
adata.X = sparse.csr_matrix(adata.X)
# Number of transcripts per cell
adata.obs['n_counts'] = np.asarray(adata.X.sum(axis = 1))
adata.obs['log_counts'] = np.log10(adata.obs.n_counts)
# Number of genes with more than one count
adata.obs['n_genes'] = (adata.X > 0).sum(axis = 1)
# Get mitochondrial and ribosomal genes
if mt_prefix is not None:
# Use string methods from pandas to make bool array
mito_genes = adata.var.gene_name.str.startswith(mt_prefix)
if mito_genes.sum()> 1:
# Compute the fraction of mitochondrial genes
adata.obs["frac_mito"] = adata[:, mito_genes].X.A.sum(axis =1) / adata.obs.n_counts
if ribo_prefix is not None:
if isinstance(ribo_prefix, (list, tuple)):
# Initialize bool array
ribo_genes = np.zeros(adata.n_vars, dtype = bool)
# Loop through each prefix and flip to True
# where we get a match.
for prefix in ribo_prefix:
ribo_genes_tmp = adata.var.gene_name.str.startswith(prefix)
ribo_genes +=ribo_genes_tmp
if ribo_genes.sum()> 1:
adata.obs["frac_ribo"] = adata[:, ribo_genes].X.A.sum(axis =1) / adata.obs.n_counts
return adata
# Curry to be able to add arguments in a tz.def eval_pipeline
@tz.curry
def lognorm_cells(
adata_,
scaling_factor = 1e4,
log = True)-> ad.AnnData:
"""
Cell count normalization as in scanpy.pp.normalize_total.
Expects count matrix in sparse.csr_matrix format.
Each gene's expression value in a given cell is given by :
g_i = \mathrm{ln} ( \frac{g_i \times \beta }{\sum g_i} + 1 )
where β is the scaling factor.
Params
------
adata_ (ad.AnnData):
Count matrix with cell and gene annotations.
scaling_factor(float, default = 1e4)
Factor to scale gene counts to represent the counts in
the cell. If scaling_factor =1e6, the values will
represent counts per million.
log (bool, default = True)
Optional argument to allow for returning the scaled cells
without normalizing.
Returns
-------
adata (ad.AnnData):
Anndata with normalized and log transformed count matrix.
"""
# Make a copy because normalization is done in-place
adata = adata_.copy()
if not sparse.isspmatrix_csr(adata.X):
adata.X = sparse.csr_matrix(adata.X)
# Get total counts per cell from `obs` df
if 'n_counts' in adata.obs.columns:
counts = adata.obs.n_counts.values
else:
counts = adata.X.sum(axis = 1).flatten()
# Convert to numpy matrix to array to be able to flatten
scaled_counts = np.array(counts).flatten() / scaling_factor
# Efficient normalization in-place for sparse matrix
sparsefuncs.inplace_csr_row_scale(adata.X, 1/scaled_counts)
# Call the log1p() method on the csr_matrix
if log:
adata.X = adata.X.log1p()
return adata
# Curry to enable adding arguments in a tz.pipe()
@tz.curry
def cv_filter(
adata,
min_mean = 0.025,
min_cv= 1,
return_highly_variable = False)-> ad.AnnData:
"""
Performs the Coefficient of Variation filtering according
to the Poisson / Binomial counting statistics. The model assumes
the coefficient of variation per gene is given by :
\mathrm{log} (CV) \approx - \frac{1}{2}\mathrm{log} (\mu) + \epsilon
The values will be computed assuming a normalized and
log-scaled count matrix.
Params
------
min_mean (float, default = 0.025).
Lower bound cutoff for the mean of the gene feature.
min_cv (float, default = None)
Lower bound for the coefficient of variation of the
gene feature. Recommended value 1.
return_highly_variable(bool, default = True)
Whether to return an AnnData with the columns corresponding
to only the highly variable genes.
Note: even when running with `return_highly_variable=False`
the function will return genes only with nonzero mean and
nonzero variance, i.e. it will discard those genes.
Returns
-------
adata_filt (ad.AnnData)
AnnData with coeffifient of variation stats on the `var`
dataframe.
"""
# Calculate mean and variance across cells
mean, var = sparsefuncs.mean_variance_axis(adata.X, axis = 0)
# Check if there are nonzero values for the mean or variance
ix_nonzero = list(set(np.nonzero(mean)[0]).intersection(set(np.nonzero(var)[0])))
if len(ix_nonzero) > 0:
# Use numpy-like filtering to select only genes with nonzero entries
adata = adata[:, ix_nonzero].copy()
# Recompute mean and variance of genes across cells
mean, var = sparsefuncs.mean_variance_axis(adata.X, axis = 0)
# Get nonzero mean indices
nz = np.nonzero(mean)
# Check that there are only nonzero mean values
assert adata.n_vars == nz[0].shape[0]
else:
print ('Only zero mean or variance values for the genes in the count matrix.')
return None
std_dev = np.sqrt(var)
# Element-wise coefficient of variation
cv = std_dev / mean
log_cv = np.log(cv)
log_mean = np.log(mean)
df_gene_stats = pd.DataFrame(
np.vstack([mean, log_mean, var, cv, log_cv]).T,
columns=["mean", "log_mean", "var", "cv", "log_cv"],
index = adata.var.index
)
new_adata_var = pd.concat(
[adata.var, df_gene_stats],
axis = 1
)
adata.var = new_adata_var
slope, intercept, r, pval, stderr = st.linregress(log_mean, log_cv)
poisson_prediction_cv = slope*log_mean + intercept
# Binary array of highly variable genes
gene_sel = log_cv > poisson_prediction_cv
adata.var['highly_variable'] = gene_sel.astype(int)
if min_mean and min_cv is not None:
adata_filt = adata[:,((adata.var.highly_variable == True)&\
(adata.var['mean'] > min_mean)&\
(adata.var['cv'] > min_cv))].copy()
else:
adata_filt = adata[:, adata.var.highly_variable == True].copy()
if return_highly_variable:
return adata_filt
else:
return adata
@tz.curry
def sample_to_name(sample_id, eliminate_parens = True, eliminate_hcl = False):
"""
Returns processed version of sample id.
Note:The best way to match is to try to match annotations in lowercase.
"""
if 'ethylisothiourea sulfate' in sample_id:
return 'Methylisothiourea sulfate'
# Eliminate "_CD3" overhang
s = sample_id.split('_CD3')[0]
# Trim spaces
s = s.strip()
if eliminate_parens:
s = s.split('(')[0]
# Trim spaces
s = s.strip()
# Remove HCl overhang
if eliminate_hcl:
s = s.split(' HCl')[0]
s = s.strip()
return s
def get_ix_nondup(labels):
"""
Returns a binary array given a set of categorical labels.
Used for filtering out duplicated labels in a minibatch when using
the n-way cross entropy ranking loss (online ranking).
Example
-------
x = np.random.randint(0, 10, 10)
x
>>> array([4, 8, 8, 7, 9, 8, 8, 3, 8, 9])
maskr = get_ix_nondup(x)
maskr
>>> array([ True, True, False, True, True, False, False, True, False,
False])
x[maskr]
>>> array([4, 8, 7, 9, 3])
"""
if isinstance(labels, torch.Tensor):
is_tensor = True
dev = labels.device
if labels.device.type == 'cuda':
labels = labels.cpu()
if labels.requires_grad:
labels = labels.detach()
labels = labels.numpy()
else:
is_tensor =False
# Gen binary array of non-duplicated labels
mask = ~pd.Series(labels).duplicated().values
# Check that no duplicated values remain
assert len(np.nonzero(pd.Series(labels[mask]).duplicated().values)[0]) == 0
if is_tensor:
mask = (torch.from_numpy(mask)).to(dev)
return mask
def get_acc_df_cell2mol(df_cells, name_to_target, name_to_class, k=1):
"""
Returns a top-k accuracy dataframe per sample.
Params
------
df_cells (pd.DataFrame)
Cell dataframe (from adata or df_embedding) that contains the top-k accuracy.
Returns
-------
accuracy_df (pd.DataFrame)
"""
pred_df = (
df_cells.groupby(["drug_name", "top" + str(k) + "_accuracy"]).size().unstack().fillna(0)
)
pred_arr = pred_df.values / pred_df.values.sum(axis=1).reshape(-1, 1) * 100
perc_pred_df = pd.DataFrame(pred_arr, index=pred_df.index, columns=pred_df.columns)
accuracy_df = (
perc_pred_df.sort_values(by=0, ascending=True)[1].to_frame().reset_index()
)
accuracy_df.rename(columns = {1:'top@'+ str(k) + '_accuracy'}, inplace = True)
accuracy_df['target'] = accuracy_df['drug_name'].map(name_to_target)
accuracy_df['drug_class'] = accuracy_df['drug_name'].map(name_to_class)
accuracy_df['sample_class'] = accuracy_df['drug_name'] + '_' + accuracy_df['drug_class'].str.lower()
return accuracy_df
def freedman_diaconis_rule(arr):
"""
Calculates the number of bins for a histogram using the Freedman-Diaconis Rule.
Modified from https://github.com/justinbois/bebi103/blob/master/bebi103/viz.py
"""
h = 2* (np.percentile(arr, q=75) - np.percentile(arr, q = 25))/ np.cbrt(len(arr))
if h == 0.0:
n_bins = 3
else:
n_bins = int(np.ceil(arr.max() - arr.min()) / h)
return n_bins
def l1_norm(arr1, arr2):
'''
Compute the L1-norm between two histograms.
It uses the Freedman-Diaconis criterion to determine the number of bins.
It will be positive if the mean(arr2) > mean(arr1) following the convention
from PopAlign.
Modified from https://github.com/thomsonlab/popalign/blob/master/popalign/popalign.py
Parameters
----------
arr1 (array-like)
Distribution of gene for population 1.
arr2 (array-like)
Distribution of gene for population 2.
Returns
-------
l1_score(float)
L1 norm between normalized histograms of gene distributions.
Example
-------
import numpy as np
from sc_utils import sc
x = np.random.normal(loc = 0, size = 100)
y = np.random.normal(loc = 3, size = 100)
sc.l1_norm(x, y)
>>>1.46
'''
if len(arr1) == len(arr2):
nbins = freedman_diaconis_rule(arr1)
else:
nbins_1 = freedman_diaconis_rule(arr1)
nbins_2 = freedman_diaconis_rule(arr2)
nbins = int((nbins_1 + nbins_2)/2)
max1, max2 = np.max(arr1), np.max(arr2) # get max values from the two subpopulations
max_ = max(max1,max2) # get max value to define histogram range
if max_ == 0:
return 0
else:
b1, be1 = np.histogram(arr1, bins=nbins, range=(0,max_)) # compute histogram bars
b2, be2 = np.histogram(arr2, bins=nbins, range=(0,max_)) # compute histogram bars
b1 = b1/len(arr1) # scale bin values
b2 = b2/len(arr2) # scale bin values
if arr1.mean()>=arr2.mean(): # sign l1-norm value based on mean difference
l1_score = -np.linalg.norm(b1-b2, ord=1)
return l1_score
else:
l1_score = np.linalg.norm(b1-b2, ord=1)
return l1_score
def ecdf(x)->(np.array, np.array):
'''
Returns ECDF of a 1-D array.
Params
------
x(array or list)
Input array, distribution of a random variable.
Returns
-------
x_sorted : sorted x array.
ecdf : array containing the ECDF of x.
'''
n = len (x)
x_sorted = np.sort(x)
ecdf = np.linspace(0, 1, len(x_sorted))
return x_sorted, ecdf
def get_stats(distro_x, distro_y):
"""
Returns statistics from testing that `distro_x` takes larger values that `distro_y`.
Returns
-------
ks, pval_ks, l1_score
"""
# For a given value of the data, ECDF of sample 1 takes values less than sample 2
ks, pval_ks = st.ks_2samp(distro_x, distro_y, alternative="less")
# Positive if mean(distro_x) > mean(distro_y)
l1_score = l1_norm(distro_y, distro_x)
return ks, pval_ks, l1_score
# def get_ix_drug(drugbank, drug_name, verbose = False)->np.ndarray:
# """Returns index of molecule in drugbank."""
# try:
# ix_ = drugbank[drugbank['drug_name'] ==drug_name].index.values[0]
#
# except :
# ix_ = drugbank[drugbank['drug_name'].str.contains(drug_name)].index.values[0]
# if verbose:
# print('Getting drugbank index for :%s'%drugbank.iloc[ix_]['drug_name'] )
# return ix_
#
# def get_ix_cells(adata, drug_name, verbose = False)->np.ndarray:
# """Returns index of cells perturbed by `drug_name` in adata"""
# try:
# ix_cells = adata[adata.obs['drug_name']==drug_name].obs.index.values
# except:
# ix_cells = adata[adata.obs['drug_name'].str.contains(drug_name)].obs.index.values
# if verbose :
# print('Getting adata cell indices for :%s'%adata[ix_cells[0]].obs['drug_name'].values[0] )
# return ix_cells
#
#
# def get_cosine_distribution_drug(drugbank, adata, query_drug_name, perturb_drug_name, cosine_arr, verbose = False):
# """
# Returns the cosine similarity distribution for the cells perturbed with
# `perturb_drug_name` (indexed in adata), and a molecule `query_drug_name` (indexed in drugbank).
# If `query_drug_name` and `perturb_drug_name` are the same, it returns the
# cosine similarity of the given molecule against the cells perturbed by it.
#
# Note: Expects cosine_arr to be of shape (n_mols, n_cells)
#
# Params
# ------
# query_drug_name (str)
# Name of the drug to query against.
#
# perturb_drug_name (str)
# Name of the drug that perturbed the cells to retrieve.
#
# Returns
# -------
# cosine_similarity_distribution
#
# Note:Expects cosine_arr to be shape (mols, cells)
# """
# ix_drug = get_ix_drug(drugbank, query_drug_name, verbose)
# ix_cells = get_ix_cells(adata, perturb_drug_name, verbose)
# cosine_similarity_distribution = cosine_arr[ix_drug, ix_cells]
#
# return cosine_similarity_distribution
#
#
# def get_similarity_drug_one_vs_all(
# drugbank,
# adata,
# drug_name,
# cosine_arr,
# verbose = False
# ):
# """
# Returns the cosine similarity distribution of a molecule with cells perturbed by it,
# and the cos. sim. dist. of the molecule with cells coming from other samples.
#
# Expects cosine_arr to be of shape (n_mols, n_cells)
# """
# n_mols, n_cells = cosine_arr.shape
# ix_drug, ix_cells = get_ix_drug(drugbank, drug_name), get_ix_cells(adata, drug_name)
#
# # Get cosine similarity distribution of a drug with itself
# cosine_cells_drug = cosine_arr[ix_drug, ix_cells]
#
# # Get the indices of all perturbed with other molecules but `drug_name`
# other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
# cosine_others = cosine_arr[ix_drug, other_cells_ix]
# return cosine_cells_drug, cosine_others
#
#
# def get_cosine_distribution_df(
# drug_name,
# drugbank,
# adata,
# cosine_arr,
# drugbank_to_selleck,
# n_top = 10000,
# cols_viz = ['sample_class', 'target', 'drug_class', 'pn', 'drug_name'],
# filter_by = 'sample_class',
# n_cells_filter = 5,
# anti = False,
# return_acc_only = False
# )->pd.DataFrame:
# """
# Returns an annotated dataframe of the cells with highest cosine similarity to
# a query molecule `drug_name`.
#
# Notes: Assumes an adata and drugbank (dataframe) exist and that their indices
# have been reset.
#
# Params
# ------
# drug_name (str)
# n_top (int, default= 10000,)
# Number of cells with highest similarity to retrieve.
#
# cols_viz (list, default= ['sample_class', 'target', 'drug_class', 'pn'], )
# Which columns to use for visualization. Cols have to be in adata.
#
# filter_by (str, default= 'sample_class')
# Column to filter noise cells.
#
# n_cells_filter (int, default = 5,)
# Lower bound threshold above to which filter noise cells, i.e.
# if a sample has less than `n_cells_filter` in the top cells,
# that sample won't be in the final visualization.
#
# anti (bool = False)
# Whether to reverse order, get cells with lowest cosine similarity.
#
# Returns
# -------
# df_viz
# """
# ix_ = get_ix_drug(drugbank, drug_name, verbose = False)
#
# name_of_drug = drugbank.iloc[ix_]['drug_name']
# name_of_drug = drugbank_to_selleck[name_of_drug]
# print('Returning predictions for %s'%name_of_drug)
#
# # Reverse order : get cells with lowest cosine sim
# if anti:
# ix_top_cells = np.argsort(cosine_arr[ix_])[:n_top]
# else:
# ix_top_cells = np.argsort(cosine_arr[ix_])[::-1][:n_top]
#
# # Make a dataframe containing the cosine similarities and cols_viz
# df_viz = adata[ix_top_cells].obs[cols_viz]
#
# #try:
# sample_val_counts = df_viz.drug_name.value_counts()
# if name_of_drug in sample_val_counts.index.values:
# n_correct = sample_val_counts[name_of_drug]
#
# acc = n_correct / sample_val_counts.sum() * 100
# print('Accuracy: %.3f'%acc)
# if return_acc_only:
# return acc
# else:
# pass
# else:
# print('Accuracy: 0')
# if return_acc_only:
# return 0
# #except:
# # pass
#
# df_viz['cosine_similarity'] = cosine_arr[ix_][ix_top_cells]
# val_counts = df_viz[filter_by].value_counts()
# samples_in = val_counts[val_counts > n_cells_filter].index.values
#
# return df_viz[df_viz[filter_by].isin(samples_in)]
#
class EvaluateCrossRetrieval:
"""
Base class to evaluate cross modality-retrieval a joint embedding
of cells and molecules.
It is designed for evaluation in a test set, comprised of a tuple
(test molecules, test cells). Nevertheless, one can pass the full datasets
(i.e. train+val+test) and still leverage the functionalities.
TO-DO: currently deisgned with cosine vs distance mode. Another design
that could make code easier to read is by defining cosine dist = 1-cos_theta
"""
def __init__(
self,
df_drugs,
adata,
model,
model_type = 'nn',
dataset = 'thomsonlab',
drugs_col_name = 'name',
precomputed_mol_embeddings = False,
embedding_dim = 64
):
"""
Params
------
df_drugs (pd.DataFrame)
Annotated version of the drugs in the test set.
It must ideally have the following in its columns:
['drug_class', 'target', 'drug_name']
adata(pd.DataFrame)
Test adata containing count matrix as .X and projected cells in its
`obs.` dataframe.
model (nn.Module)
Joint embedding model. to-do: extend functionality for CCA or other models.
model_type(str, default = 'nn')
Running neural net or CCA model.
dataset(str, default = 'thomsonlab')
Sets the formmatting options for a specific dataset.
drugs_col_name(str, default = 'sample_id')
If there's a specific column name for the name of drugs in the df_drugs dataset.
Notes
-----
Assumptions:
- the `df_drugs` and the `adata` have the same annotation for the drug
names.
- the cell data has the same columns (input features) for which both the
cell encoder and the joint embedding model were trained on.
Attributes
----------
name_to_target(dict): drug_name (key) -> drug target (value)
name_to_target(dict): drug_name (key) -> drug class (value)
sample_counts(dict): Number of cells for each perturbation / drug / sample.
drug_name -> # of cells perturbed by drug
sample_counts_ix (dict): drug_index -> # of cells perturbed by drug
ix_samples_cell (np.array): Perturbation's index of each cell.
"""
self.cuda = torch.cuda.is_available()
self.embedding_dim = embedding_dim
# Format column names
if 'drug_name' not in adata.obs.columns:
if dataset == 'thomsonlab':
adata.obs['drug_name'] = adata.obs['sample_id'].apply(
lambda x: sample_to_name(str(x), eliminate_parens = True, eliminate_hcl = False)
).str.lower()
elif dataset == 'sciplex':
adata.obs['drug_name'] = adata.obs['product_name'].apply(
lambda x: sample_to_name(str(x), eliminate_parens = True, eliminate_hcl = False)
).str.lower()
if 'drug_name' not in df_drugs:
df_drugs['drug_name'] = df_drugs[drugs_col_name].apply(
lambda x: sample_to_name(str(x), eliminate_parens = True, eliminate_hcl = False)
).str.lower()
df_drugs_test = df_drugs[df_drugs.drug_name.isin(adata.obs.drug_name.unique())]
if dataset == 'sciplex':
df_drugs_test.drop_duplicates(subset = ['drug_name'], inplace = True)
# Check drugs in both datasets coincide
#assert len(set(adata.obs.drug_name.unique()) - set(df_drugs_test.drug_name.unique())) == 0, 'Drugs in both datasets do not coincide'
# We will use numpy-indexing so reset them
adata.obs.reset_index(drop = True, inplace = True)
df_drugs_test.reset_index(drop = True, inplace = True)
# Assign an index to each drug.
codes, unique_drugs = np.arange(len(df_drugs_test)), df_drugs_test.drug_name.values #pd.factorize(df_drugs_test['drug_name'])
# Make sure we only have unique drugs
assert len(unique_drugs) == df_drugs_test.drug_name.unique().shape[0]
df_drugs_test['sample_code'] = codes
self.drugbank = df_drugs_test
self.adata = adata
self.model = model
self.ix_to_name = dict(zip(codes, unique_drugs))
#dict(df_drugs_test[['sample_code', 'drug_name']].values)
self.name_to_ix = dict(zip(unique_drugs, codes))
#{val:key for key,val in self.ix_to_name.items()}
self.sample_counts = adata.obs.drug_name.value_counts()
self.sample_counts_idx = {
self.name_to_ix[sample]: self.sample_counts[sample] \
for sample in self.sample_counts.keys()
}
self.test_drugs = self.drugbank.drug_name.values
# Make drug target and drug class annotation dictionaries
if dataset == 'sciplex':
self.name_to_target = dict(adata.obs[['drug_name', 'target']].values)
self.adata.obs['target'] = self.adata.obs.drug_name.apply(
lambda x: self.name_to_target[x] if x in self.name_to_target.keys() else 'undefined'
)
else:# thomsonlab
self.drugbank.rename(columns = {'Target': 'target'}, inplace = True)
self.name_to_target = dict(df_drugs_test[['drug_name', 'target']].values)
self.name_to_class = dict(df_drugs_test[['drug_name','drug_class']].values)
self.adata.obs['drug_class'] = self.adata.obs.drug_name.apply(
lambda x: self.name_to_class[x] if x in self.name_to_class.keys() else 'undefined'
)
self.adata.obs['target'] = self.adata.obs.drug_name.apply(
lambda x: self.name_to_target[x] if x in self.name_to_target.keys() else 'undefined'
)
self.drugbank['name_class'] = self.drugbank['drug_name'] + ['_'] + self.drugbank['drug_class']
self.test_drugs_ixs = [self.name_to_ix[drug] for drug in self.test_drugs]
# For each cell, get its perturbation's index
self.ix_samples_cell = np.array(
[self.name_to_ix[drug] for drug in self.adata.obs['drug_name'].values]
)
self.drugbank['n_cells'] = self.drugbank.drug_name.map(self.sample_counts)
#self.adata = self.adata.copy()
# Assign some colormaps for plotting
self.colormaps = {
'drug_class': 'Blues_r',
'within_class_acc': 'Blues_r',
'Target': 'Oranges_r', 'target': 'Oranges_r',
'pn': 'Greens_r',
'pathway': 'Purples_r'
}
if precomputed_mol_embeddings:
self.mol_embedding = self.drugbank[['dim_' + str(i) for i in range(1,embedding_dim +1)]].values
self.precomputed_mol_embeddings = precomputed_mol_embeddings
else:
self.precomputed_mol_embeddings = None
def eval_pipeline(
self,
plot = False,
mode = 'cosine',
n_cores = 2
):
"""
Runs all evaluation metrics.
"""
print('Computing mol2cell & cell2mol accuracy from %s matrix.'%mode)
if self.precomputed_mol_embeddings is None:
self.project_molecules()
if mode == 'cosine':
self.compute_cosine_arr(return_ = False)
elif mode == 'l2':
self.compute_dist_matrix()
else:
raise NameError('Mode to be one of [`cosine`, `l2`]. Input : %s'%mode)
# Saves mol2cell accuracies in self.m2c_acc and in self.drugbank
self.eval_mol2cell_accuracy(mode= mode, return_ = False)
print('Finised computing mol2cell accuracies.')
# Saves results in self.df_c2m for top5 accuracy
self.eval_cell2mol_accuracy(mode = mode)
self.get_acc_df_cell2mol()
print('Finished computing cell2mol accuracies.')
# Run KS tests
print('Running KS test...')
self.run_ks_one_vs_all(n_cores,mode=mode)
print('Finished KS test.')
# Run mol2cell above mean
self.eval_m2c_above_mean_all(mode=mode)
# Aggregate metrics
self.eval_summary()
print('Finished pipeline.')
# Plot results !
if plot:
pass
def eval_summary(self, ks_pval_thresh = 1e-8, above_mean_thresh = 95, return_= False):
"Assumes eval_pipeline() has been executed to calculate all metrics."
# Average top5 cell2mol accuracy
cell2mol_top5_avg = np.mean(self.c2m_acc_df.top5_accuracy)
# Average mol2cell accuracy
mol2cell_avg = np.mean(self.m2c_acc)
# Percentage of molecules with corresponding cells having significantly
# learn relationships, by using KS test of own cells vs all other cells
percentage_ks_low = len(
self.drugbank[self.drugbank.ks_pval < ks_pval_thresh]
) / len(self.test_drugs) * 100
# Percentage of cells above the mean of mol2cell distribution
percentage_above_mean = len(
self.drugbank[self.drugbank.acc_above_mean > above_mean_thresh]
) / len(self.test_drugs) * 100
metrics_dict = {
'cell2mol_top5_acc': cell2mol_top5_avg,
'mol2cell_acc': mol2cell_avg,
'perc_ks_significant': percentage_ks_low,
'perc_above_mean': percentage_above_mean
}
self.summary_stats = metrics_dict
if return_:
return metrics_dict
def get_ix_drug(self, drug_name):
return self.name_to_ix.get(drug_name, 'None')
def get_ix_cells(self, drug_name, verbose = False):
try:
ix_cells = self.adata[self.adata.obs['drug_name']==drug_name].obs.index.values
except:
ix_cells = self.adata[self.adata.obs['drug_name'].str.contains(drug_name)].obs.index.values
if verbose :
print('Getting adata cell indices for :%s'%adata[ix_cells[0]].obs['drug_name'].values[0] )
return ix_cells.astype(int)
def compute_rdkit_mol_from_smiles(self):
self.drugbank['mol'] = self.drugbank.SMILES.apply(
Chem.MolFromSmiles
)
self.name_to_mol = dict(self.drugbank[['drug_name', 'mol']].values)
@torch.no_grad()
def project_molecules(self, _return = False):
"""
Computes molecule embeddings in self.drugbank df.
"""
#Get Rdkit mols in place
self.compute_rdkit_mol_from_smiles()
labels_tensor = torch.arange(len(self.drugbank))
drugs_tensor = get_drug_batch(
labels_tensor,
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
self.model.eval()
mol_embedding = self.model.molecule_encoder.project(
Batch.from_data_list(drugs_tensor)
)
if self.cuda: # bring to CPU
mol_embedding=mol_embedding.cpu().numpy()
else:
mol_embedding = mol_embedding.numpy()
self.mol_embedding = mol_embedding
if _return:
return mol_embedding
# refactoring
# def compute_cosine_arr(self, return_ = False, project_mols = False, n_dims = 64):
# """
# Computes cosine array. It stores an output array
# """
# if project_mols:
# mol_embedding = self.project_molecules()
# else:
# mol_embedding = self.drugbank[['dim_' + str(i) for i in range(1,n_dims +1)]].values
#
# cell_embedding = self.adata.obs[['dim_' + str(i) for i in range(1, n_dims+1)]].values
#
# #self.mol_embedding = mol_embedding
# mol_embedding = self.mol_embedding
#
# self.cell_embedding = cell_embedding
#
# # Normalize to make row vectors
# mol_embedding_norm = mol_embedding / np.linalg.norm(mol_embedding, axis = 1).reshape(-1,1)
# cell_embedding_norm = cell_embedding / np.linalg.norm(cell_embedding, axis = 1).reshape(-1,1)
#
# # Compute cosine similarity, shape (molecules, cells)
# cosine_arr = np.matmul(mol_embedding_norm, cell_embedding_norm.T)
#
# #print('Shape of cosine similarity array: {0}'.format(cosine_arr.shape))
# self.cosine_arr = cosine_arr
#
# if return_:
# return cosine_arr
def compute_cosine_arr(self, return_ = False):
"""
Computes cosine array. It stores an output array
"""
# Extracts the molecule embeddings if already in the object
try :
mol_embedding = self.mol_embedding
except AttributeError:
print('Projecting molecules using model.')
mol_embedding = self.project_molecules()
cell_embedding = self.adata.obs[['dim_' + str(i) for i in range(1, self.embedding_dim+1)]].values
#self.mol_embedding = mol_embedding
mol_embedding = self.mol_embedding
self.cell_embedding = cell_embedding
# Normalize to make row vectors
mol_embedding_norm = mol_embedding / np.linalg.norm(mol_embedding, axis = 1).reshape(-1,1)
cell_embedding_norm = cell_embedding / np.linalg.norm(cell_embedding, axis = 1).reshape(-1,1)
# Compute cosine similarity, shape (molecules, cells)
cosine_arr = np.matmul(mol_embedding_norm, cell_embedding_norm.T)
#print('Shape of cosine similarity array: {0}'.format(cosine_arr.shape))
self.cosine_arr = cosine_arr
if return_:
return cosine_arr
def compute_dist_matrix(self, run_with_torch = False, return_=False):
"""
Computes the euclidean distances between cells and molecules,
and saves it as an attribute.
It assumes compute_cosine_arr() has already been run.
"""
try :
mol_embedding = self.mol_embedding
except NameError:
print('Projecting molecules using model.')
mol_embedding = self.project_molecules()
try:
self.cell_embedding = self.adata.obs[['dim_' + str(i) for i in range(1, self.embedding_dim+1)]].values
except:
raise ValueError('Could not retrieve cell embeddings from adata, check adata or n_dims arg.')
if run_with_torch:
self.D = generalized_distance_matrix_torch(
torch.from_numpy(mol_embedding),
torch.from_numpy(self.cell_embedding)
)
else:
self.D = generalized_distance_matrix(
mol_embedding, self.cell_embedding
)
if return_:
return self.D
def get_top_ixs(self, data_type = 'mols', mode = 'cosine', top_k = 15):
"Returns the top indices from a cosine similarity or L2 distance matrix."
axis = 1 if data_type == 'mols' else 0
#print(axis)
largest = True if mode == 'cosine' else False
#largest = mode == 'cosine'
if data_type == 'mols':
top_k = self.sample_counts.max()
if mode == 'cosine':
top_ixs = (
torch.from_numpy(self.cosine_arr)
.topk(k=top_k, largest=largest, dim=axis)
.indices.numpy()
)
elif mode == 'l2': # distance matrix
top_ixs = (
torch.from_numpy(self.D)
.topk(k=top_k, largest=largest, dim=axis)
.indices.numpy()
)
else:
raise NameError('Mode %s is not implemented. Choose one of [`cosine`, `l2`.]'%mode)
return top_ixs
def get_similarity_drug_one_vs_all(self, drug_name, mode = 'cosine')->Tuple[np.ndarray, np.ndarray]:
"""
Returns the cosine similarity distributions of a drug with cells perturbed by it,
and all other cells coming from other samples.
"""
ix_drug, ix_cells = self.get_ix_drug(drug_name), self.get_ix_cells(drug_name)
if mode == 'cosine':
n_mols, n_cells = self.cosine_arr.shape
# Get the indices of all perturbed with other molecules but `drug_name`'s
other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
#similarity_matrix = self.cosine_arr
# Get cosine similarity/ l2 distance distribution of a drug with itself
similarities_cells_drug = self.cosine_arr[ix_drug, ix_cells]
similarities_others = self.cosine_arr[ix_drug, other_cells_ix]
elif mode == 'l2':
n_mols, n_cells = self.D.shape
# Get the indices of all perturbed with other molecules but `drug_name`'s
other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
#similarity_matrix = self.D
similarities_cells_drug = self.D[ix_drug, ix_cells]
similarities_others = self.D[ix_drug, other_cells_ix]
else:
raise NameError('Mode %s not implemented.'%mode)
# Get cosine similarity/ l2 distance distribution of a drug with itself
# similarities_cells_drug = similarity_matrix[ix_drug, ix_cells]
#
# # Get the indices of all perturbed with other molecules but `drug_name`'s
# other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
# similarities_others = similarity_matrix[ix_drug, other_cells_ix]
return similarities_cells_drug, similarities_others
def eval_mol2cell_accuracy(self, mode= 'cosine', return_ = False, k = 15):
top_ixs_mols = self.get_top_ixs(data_type = 'mols', mode = mode, top_k = k)
# Initialize molecule accuracies list
accs = []
for i, drug_ix in enumerate(self.test_drugs_ixs):
# Get the drug indices for each of the top cells given molecule query
top_ix_mol = self.ix_samples_cell[top_ixs_mols[i]]
# Get only the top n indices, for n the number of cells sampled in experiment
top_ix_mol_normalized = top_ix_mol[: int(self.sample_counts_idx[drug_ix])]
# Acc : fraction of correct cells
acc = np.sum(top_ix_mol_normalized == drug_ix) / (self.sample_counts_idx[drug_ix]) * 100
accs.append(acc)
self.m2c_acc = accs
self.drugbank['m2c_accuracy'] = accs
if return_:
return accs
def eval_cell2mol_accuracy(self, mode = 'cosine', k = 15):
top_ixs_cells = self.get_top_ixs(data_type = 'cells', mode = mode, top_k=k).T
acc_indicator = np.zeros((self.adata.n_obs, 5))
if isinstance(self.test_drugs_ixs, list):
self.test_drugs_ixs = np.array(self.test_drugs_ixs)
for i, sample_ix in tqdm.tqdm(enumerate(self.ix_samples_cell)):
# Get top 1, top3, top5, 10, and 15 accuracy
acc_indicator[i, 0] = 1 if sample_ix == self.test_drugs_ixs[top_ixs_cells[i, 0]] else 0
acc_indicator[i, 1] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :3]] else 0
acc_indicator[i, 2] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :5]] else 0
acc_indicator[i, 3] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :10]] else 0
acc_indicator[i, 4] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :15]] else 0
self.c2m_global_acc = acc_indicator.sum(axis = 0)/ self.adata.n_obs *100
ks = [1, 3, 5, 10, 15]
df_acc = pd.DataFrame(acc_indicator, columns = ['top' + str(i) + '_accuracy' for i in ks])
self.adata.obs = pd.concat([self.adata.obs, df_acc.set_index(self.adata.obs.index)], axis = 1)
def eval_m2c_mean(self, drug_name, centrality_measure = 'mean', mode = 'cosine'):
"""
Computes the fraction of cells that have cosine similarity w.r.t. to its own molecule
higher than the mean of the distribution across all cells.
"""
drug_ix = self.get_ix_drug(drug_name)
# Get cosine distribution for drug and all others
sim_distro_drug, sim_distro_others = self.get_similarity_drug_one_vs_all(
drug_name, mode=mode
)
if centrality_measure=='mean':
central_measure = np.concatenate([sim_distro_drug, sim_distro_others]).mean()
elif centrality_measure=='median':
central_measure = np.median(np.concatenate([sim_distro_drug, sim_distro_others]))
else:
raise NotImplementedError('%s is not implemented'%mode)
if mode == 'cosine':
n_significant = (sim_distro_drug > central_measure).sum()
# Distance of correct molecule lower than mean of whole distribution
elif mode == 'l2':
n_significant = (sim_distro_drug < central_measure).sum()
percent_significant = n_significant / len(sim_distro_drug) * 100
return percent_significant
def eval_m2c_above_mean_all(self, centrality_measure = 'mean', mode = 'cosine',
n_cores = 4, return_ = False):
"Evaluate above-mean accuracy for all drugs."
acc_arr = Parallel(n_jobs = n_cores)(
delayed(self.eval_m2c_mean)(drug, centrality_measure, mode)
for drug in tqdm.tqdm(
self.test_drugs, position = 0, leave = True
)
)
self.drugbank['acc_above_mean'] = acc_arr
if return_:
return acc_arr
def run_ks_test(self, drug_name, mode = 'cosine'):
"Returns statistics of running one vs all test for a given drug."
own, others = self.get_similarity_drug_one_vs_all(drug_name, mode= mode)
if mode == 'cosine':
# Test for true distro of cosine sim having higher values
ks, pval_ks, l1_score = get_stats(own, others)
elif mode == 'l2':
# Test for true distro of l2 distances having lower values
ks, pval_ks, l1_score = get_stats(others, own)
return ks, pval_ks, l1_score
def run_ks_one_vs_all(
self,
n_cores = 4,
mode = 'cosine',
stat_metric = 'ks_pval',
thresh_stat = 1e-4,
return_ = False
):
"""
Returns results from testing the mol2cell cosine similarity distributions of a drug
with cells perturbed by it, and other cells.
Params
------
n_cores (int, default = 4)
Number of processors to use for the parallellization.
stat_metric (str, default = 'ks_pval')
cols (list)
Notes
-----
The rationale is that the cosine similarity between a given drug and cells pertrubed by it
should be higher than to all other cells if the model has learnt a meaningful relationship.
Runs on parallel using joblib.
"""
results = Parallel(n_jobs = n_cores)(
delayed(self.run_ks_test)(drug,mode) for drug in tqdm.tqdm(
self.test_drugs, position = 0, leave = True
)
)
self.df_stat_tests = pd.DataFrame(
results, columns = ['ks_score', 'ks_pval', 'l1_score']
)
self.drugbank = pd.concat([self.drugbank, self.df_stat_tests], axis = 1)
if "pval" in stat_metric:
top_drug_df = self.drugbank[self.drugbank[stat_metric] < thresh_stat].sort_values(
by=stat_metric, ascending=True
)
# use score
elif "score" in stat_metric:
top_drug_df = df_drugs_test_[df_drugs_test_[stat_metric] > thresh_stat].sort_values(
by=stat_metric, ascending=False
)
else:
raise AssertionError('metric should be either pvalue or score.')
self.top_drugs_ks = top_drug_df
if return_:
return top_drug_df
def get_cosine_distribution_df(
self,
drug_name,
n_top = None,
cols_viz = ['drug_name', 'target', 'drug_class'],
return_acc_only = False,
filter_by = 'drug_name',
n_cells_filter = 10,
anti = False
):
"""
Returns a dataframe of the cells closest to a molecule, grouped by sample.
Params
------
filter_by (str, default = 'drug_name')
Column to filter out spurious high similarity.
"""
if n_top is None:
n_top = self.adata[self.adata.obs.drug_name == drug_name].n_obs
ix_ = self.get_ix_drug(drug_name)
#name_of_drug = self.name_to_ix[ix_] #drugbank.iloc[ix_]['drug_name']
#name_of_drug = drugbank_to_selleck[name_of_drug]
#print('Returning predictions for %s'%name_of_drug)
# Reverse order : get cells with lowest cosine sim
if anti:
ix_top_cells = np.argsort(self.cosine_arr[ix_])[:n_top]
else:
ix_top_cells = np.argsort(self.cosine_arr[ix_])[::-1][:n_top]
# Make a dataframe containing the cosine similarities and cols_viz
df_viz = self.adata[ix_top_cells].obs[cols_viz]
df_viz['drug_name'] = df_viz['drug_name'].astype(str)
#try:
sample_val_counts = df_viz.drug_name.value_counts()
if drug_name in sample_val_counts.index.values:
n_correct = sample_val_counts[drug_name]
acc = n_correct / sample_val_counts.sum() * 100
print('Accuracy: %.3f'%acc)
if return_acc_only:
return acc
else:
pass
else:
print('Accuracy: 0')
if return_acc_only:
return 0
#except:
# pass
df_viz['cosine_similarity'] = self.cosine_arr[ix_][ix_top_cells]
val_counts = df_viz[filter_by].value_counts()
samples_in = val_counts[val_counts > n_cells_filter].index.values
return df_viz[df_viz[filter_by].isin(samples_in)]
def plot_boxplot_m2c(self, plot = 'accuracy', cat = 'drug_class', filt_by = 0):
#plot = "accuracy"
#by = "drug_class"
fig = plt.figure(figsize=(3, 4))
sns.boxplot(
data=self.drugbank[self.drugbank[plot] > filt_by].sort_values(
by=[plot, cat], ascending=False
),
x=plot,
y=cat,
color="lightgrey", # alpha = 0.4
)
sns.stripplot(
data=self.drugbank[self.drugbank[plot] > filt_by].sort_values(
by=[plot, cat], ascending=False
),
x=plot,
y=cat,
palette=self.colormaps[cat],
)
return fig
def plot_ks(self, drug_name, export = None, mode = 'cosine', path_figs= None, model_name= ''):
"To run after executing `run_ks_one_vs_all`"
data = self.drugbank[self.drugbank.drug_name == drug_name]
#drug_ = data['drug_name']
#print(drug_)
drug = drug_name.split()[0]
#print(drug)
#acc = data['accuracy']
#within_class_acc = data['within_class_acc']
ks, pval, l1_score, acc,perc_above_mean = data[['ks_score', 'ks_pval', 'l1_score', 'm2c_accuracy', 'acc_above_mean']].squeeze()
try:
pval = np.log10(pval)
except:
pval = 0
own, others = self.get_similarity_drug_one_vs_all(drug_name, mode = mode)
sorted_drug, ecdf_drug = ecdf(own)
sorted_other, ecdf_other = ecdf(others)
#plt.figure(figsize = (3.5, 1.7))
plt.plot(sorted_drug, ecdf_drug, label = drug + ' cells', color = 'dodgerblue')
plt.plot(sorted_other, ecdf_other, label = 'cells from other samples', color = 'lightgrey')
plt.legend(
#title = 'KS: %.2f, pval: %.3f, l1: %.2f'%(ks, pval, l1_score),
bbox_to_anchor = (1.04, 0), loc = 'lower left'
)
plt.title('One-vs-rest test KS: %.2f \n KS pval: 1x10^ %.1f, l1: %.2f \n acc mol2cell: %.1f, perc above mean: %.1f'%(
ks, pval, l1_score, acc, perc_above_mean
),)
plt.xlabel(r'%s to %s mol.'%(mode,drug))
plt.ylabel('ECDF')
if export:
plt.savefig(
os.path.join(path_figs, drug + '_ks_test_%s.png'%model_name), bbox_inches = 'tight', dpi = 230
);
def plot_ks_bokeh(self):
raise NotImplementedError
def plot_ks_bokeh_catplot(self):
raise NotImplementedError
# df_cos_viz = evaluator.get_cosine_distribution_df(
# 'cerdulatinib',n_top = adata.n_obs, n_cells_filter = 0
# )
# df_cos_viz['is_drug'] = df_cos_viz.drug_name.apply(
# lambda x: 'cerdulatinib' if x == 'cerdulatinib' else 'other'
# )
# pal = np.array(["dodgerblue", "lightgrey"])
# drug_name = 'cerdulatinib'
# ix_sort = np.argsort([drug_name, 'other'])
# ix_sort
# show(
# bokeh_catplot.ecdf(
# data=df_cos_viz.sort_values(by = 'is_drug'),
# val="cosine_similarity",
# cats="is_drug",
# marker_kwargs={"alpha": 0.3},
# tooltips=[
# ("drug_name", "@drug_name"),
# ("drug_class", "@drug_class"),
# ("cosine_similarity", "@cosine_similarity"),
# ],
# palette=list(pal[ix_sort]),
# )
# )
def plot_ks_all(self, export = True, path_figs= '../figs', model_name = ''):
"""
Plots ECDFs of cosine similarity distributions of correct drug vs all others.
Considers only top drugs. Assumes `run_ks()` has been called already.
"""
# Assert if self.top_drug_ks exists.
for i, drug in self.test_drugs:
plot_ks(drug, export, path_figs, model_name)
def get_acc_df_cell2mol(self, k=5, return_ = True):
"""
Returns a top-k accuracy dataframe per sample.
Params
------
df_cells (pd.DataFrame)
Cell dataframe (from adata or df_embedding) that contains the top-k accuracy.
"""
df_cells = self.adata.obs
#name_to_target = None, name_to_class= None,
pred_df = (
df_cells.groupby(["drug_name", "top" + str(k) + "_accuracy"]).size().unstack().fillna(0)
)
pred_arr = pred_df.values / pred_df.values.sum(axis=1).reshape(-1, 1) * 100
perc_pred_df = pd.DataFrame(pred_arr, index=pred_df.index, columns=pred_df.columns)
accuracy_df = (
perc_pred_df.sort_values(by=0, ascending=True)[1].to_frame().reset_index()
)
accuracy_df.rename(columns = {1:'top'+ str(k) + '_accuracy'}, inplace = True)
try:
if self.name_to_target is not None:
accuracy_df['target'] = accuracy_df['drug_name'].map(name_to_target)
#accuracy_df['name_target'] = accuracy_df['drug_name'] + '_' + accuracy_df['target'].str.lower()
except:
pass
try:
if self.name_to_class is not None:
accuracy_df['drug_class'] = accuracy_df['drug_name'].map(name_to_class)
accuracy_df['sample_class'] = accuracy_df['drug_name'] + '_' + accuracy_df['drug_class'].str.lower()
except:
pass
self.c2m_acc_df = accuracy_df
if return_:
return accuracy_df
def plot_report(self, drug_name, cols_viz = ['drug_name', 'drug_class']):
plt.figure(figsize = (3, 2))
self.plot_ks(drug_name)
n_cells = self.adata[self.adata.obs.drug_name == drug_name].n_obs
print('Number of cells : %d for drug %s'%(n_cells, drug_name))
df_cos_viz = self.get_cosine_distribution_df(
drug_name, cols_viz = cols_viz ,n_top= n_cells
)
df_cos_viz['name_class'] = df_cos_viz['drug_name'] + '_' + df_cos_viz['drug_class']
plt.figure(figsize = (4, 2))
sns.boxplot(
data = df_cos_viz.sort_values(
by = 'cosine_similarity', ascending = False
),
x = 'cosine_similarity',
y = 'name_class',
palette = 'Blues_r'
)
df_mol_viz = self.drugbank[self.drugbank.drug_name.isin(df_cos_viz.drug_name.unique())]
im = Draw.MolsToGridImage(
df_mol_viz.mol.to_list(), legends = df_mol_viz.drug_name.to_list(), molsPerRow=4
)
return im
def _ensure_sparse_csr_matrix(x):
"""
Returns a scipy.csr_matrix given a numpy array or sparse matrix.
"""
if sparse.issparse(x):
if sparse.isspmatrix_csr(x):
return x
else:
return x.tocsr()
else:
if isinstance(x, np.ndarray):
return sparse.csr_matrix(x)
else:
raise ValueError('Adj mat should be sparse matrix or numpy array')
def csr_to_tensor(csr_mat):
"""Returns a torch.sparse array from a scipy.csr_matrix."""
coo = csr_mat.tocoo()
values = coo.data
indices = np.vstack((coo.row, coo.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
torch_tensor = torch.sparse.FloatTensor(i, v, torch.Size(shape))#.to_dense()
return torch_tensor
class CellGraph:
def __init__(
self,
gene_reg_net:nx.Graph,
adj_mat:sparse.csr_matrix,
gene_names:list,
supervised:bool = False,
force_cpu = False
):
self.supervised = supervised
self.cuda = False if force_cpu else torch.cuda.is_available()
self.device = torch.device('cpu') if force_cpu else try_gpu()
# Adjacency matrix
self.A = _ensure_sparse_csr_matrix(adj_mat)
# Edge_indices
edge_indices = csr_to_tensor(self.A).coalesce().indices()
self.edge_indices = edge_indices
def get_cell_graph(self, x, y=None, g = None):
"""
Returns a torch_geometric.data.Data
given a vector of counts "x" and a label "y".
Params
------
x (torch.Tensor)
Counts for each gene.
y (int, default = None)
Label for supervised models.
g (torch.Tensor)
"g_cols", extra columns for conditional generators.
"""
cell_graph = torch_geometric.data.Data(
x = torch.from_numpy(x).to(self.device),
y = y,
g = g,
edge_index = self.edge_indices.to(self.device)
)
return cell_graph
def get_dims_linear(weight_mat_layers, weight_dict):
"""
Returns a list of dimensions of layers of an mlp in decreasing order.
"""
dims = []
for ix, layer in enumerate(weight_mat_layers):
dim_out, dim_in = weight_dict[layer].shape
if ix == 0:
dims.extend([dim_in, dim_out])
else:
dims.append(dim_out)
return dims
def get_dims_conv(weight_mat_layers, weight_dict):
"""
Returns a list of dimensions of layers of an GraphConvNet in decreasing order.
"""
dims = []
for ix, layer in enumerate(weight_mat_layers):
dim_in, dim_out = weight_dict[layer].shape
if ix == 0:
dims.extend([dim_in, dim_out])
else:
dims.append(dim_out)
return dims
def infer_dims_from_state_dict(
weight_dict,#:collections.OrderedDict,
model_type = 'mlp'
)->list:
"""
Returns a list of dimensions for an mlp.
Params
------
type (str, default = 'mlp')
One of mlp and gnn
"""
layer_names = list(weight_dict.keys())
if model_type == 'mlp':
weight_mat_layers = [layer for layer in layer_names if 'linear.weight' in layer]
dims = get_dims_linear(weight_mat_layers, weight_dict)
return dims
elif model_type == 'gnn':
conv_weight_layers = [
layer for layer in layer_names if 'graph' in layer and 'weight' in layer
]
dims_conv = get_dims_conv(conv_weight_layers, weight_dict)
linear_layers = [
layer for layer in layer_names if 'linear.weight' in layer
]
dims_lin = get_dims_linear(linear_layers, weight_dict)
return dims_conv, dims_lin
def get_top_genes(top_ixs, return_scores = False):
"""
Params
------
top_ixs (list of torch.tensor)
Indices for the top genes in every (SAG) pooling layer.
Notes
-----
top_ixs is a nested list, so all lists refer to the first one.
"""
assert len(top_ixs)>=2, "You already have top indices !"
# Use 0th index as second is attention weights
top_ixs_np = [x[0].numpy() for x in top_ixs]
n_layers = len(top_ixs)
if n_layers >2:
for i in np.arange(1, n_layers-1):
tmp_ixs = top_ixs_np[i][top_ixs_np[i+1]]
else:
tmp_ixs = top_ixs_np[1]
top_gene_ixs = top_ixs_np[0][tmp_ixs]
if return_scores:
att_wts = top_ixs[n_layers-1][1].detach().numpy()
#top_att_wts = att_wts[tmp_ixs]
return top_gene_ixs, att_wts
return top_gene_ixs
def make_knn_graph_eps(data, epsilon = 1, return_adjacency_only = False):
"""
Returns an ϵ-neighborhood graph in NetworkX format. The graph is
constructed by connecting points whose distance is smaller than ϵ.
Params
------
data (np.ndarray)
Dataset to make ϵ-graph from.
epsilon(float, default = 1)
Minimum distance to deem a connection between two points.
return_adjacency(bool, default = False)
Only return the adjacency matrix of the graph, instead of
the NetworkX graph.
Returns
-------
G (nx.Graph)
Epsilon graph in nx fmt.
A (optional)
Adjacency matrix.
Notes
-----
By default uses euclidean distance but could be generalized to use any
other distance metric.
"""
# Get distance matrix
#D = generalized_distance_matrix(data, data)
from sklearn import metrics
D = metrics.pairwise_distances(data)
# Keep only distances below epsilon
mask = D <= epsilon
D_thresh = D*mask
# Make weighted adjacency matrix,
# weight is prop to inverse of distance
# and safe divide by zero
A = np.divide(1, D_thresh, out = np.zeros_like(D_thresh), where=D_thresh!=0)
if return_adjacency_only:
return A
G = nx.from_numpy_matrix(A)
return G
def get_louvain_clus_epsilon_graph(data, eps = 1, _plot = False, res = 1):
"""
Returns a dictionary containing the clusters for a knn graph G.
Params
------
data (np.ndarray)
Data to construct the kNN graph from.
eps (float)
Minimum distance to make an edge between two points.
res (float, default = 1)
Resolution for Louvain algorithm.
Returns
-------
clus (dict)
Dictionary of cluster membership for each datapoint.
Keys are the standard indices of the numpy array.
"""
G = make_knn_graph_eps(data, eps=eps)
clus = community.best_partition(g, resolution = res)
if _plot:
# Visualize graph - TO-DO: color by clusters
plt.figure(figsize = (3,3))
nx.draw(G, with_labels = True, node_size = 3, node_color = 'lightblue')
return clus
def choose_clus_laplacian_epsilon_graph(
data, eps = 1, tol = 1e-6, return_eigvecs = False
):
A = make_knn_graph_eps(data, epsilon = eps, return_adjacency_only = True)
D = np.diag(A.sum(axis = 0))
L = D - A
eigvals, eigvecs = np.linalg.eig(L)
n_clus = np.sum(eigvals < tol)
if return_eigvecs:
return n_clus, eigvecs
return n_clus
def choose_clus_laplacian_knn(
data, k_neighbors = 10, tol = 1e-6, return_all = False
):
"""
Returns the optimal number of clusters to use using spectral clustering.
It uses a kNN graph as an approximation of the data manifold.
By definition, the number of disjoint vertex sets will be equal to the
number of zero eigenvalues, i.e. the dimension of the eigenspace corresponding
to the zero eigenvalue of the graph Laplacian.
Params
------
data (np.ndarray)
Data to construct the kNN graph from.
k_neighbors (int, default = 10)
Number of neighbors to build the kNN graph.
tol (float, default = 1e-6)
Upper bound to deem an eigenvalue as a zero eigenvalue.
I.e. any eigval λ < tol, will be set to zero.
Returns
-------
n_clus (int)
Number of eigenvalues below tol.
A, eigvecs (optional)
Adjacency matrix and eigenvectors of graph laplacian.
"""
A = kneighbors_graph(
data, k_neighbors, mode = 'connectivity', p = 2, include_self = True,
n_jobs = -1
).toarray()
D = np.diag(A.sum(axis = 1))
L = D - A
eigvals, eigvecs = np.linalg.eig(L)
n_clus = np.sum(eigvals < tol)
if return_all:
return n_clus, eigvecs,
return n_clus
def get_knn_graph_louvain(data, k = 4, verbose =True):
"""
Returns a knn graph in nx format and louvain cluster for each datapoint.
Params
------
k(int)
Number of k neighbors to build the graph with.
"""
if verbose:
print('Starting kNN graph')
A = kneighbors_graph(
data, k, mode='connectivity', p = 2, include_self=True
)
if verbose:
print('Finished kNN graph.')
n_edges = A.data.size
print('The data had %d edges using k= %d'%(n_edges, k))
G = nx.from_scipy_sparse_matrix(A)
if verbose:
print('Starting Louvain clustering algorithm.')
clus = community.best_partition(G)
n_clus = max(clus.values())
if verbose:
print('Found using %d clusters with k = %d \n'%(n_clus, k))
print('Finished Louvain.')
clus_labels = clus.values()
return G, clus_labels
def get_bayesian_information_criterion(data, max_clusters, min_clusters = 2):
"""
Returns the bayesian information criterion for a number of Gaussian Mixture models.
This is aimed to choose the number of clusters for a given dataset.
The number of clusters that minimizes the Bayesian information criterion, maximizes
the likelihood of the model best explaining the dataset.
Params
--------
max_clusters(int)
Maximum number of clusters to run against.
data (array-like or pd.DataFrame)
Dataset (n_samples, n_variables) to be clustered
Returns
--------
bic(list)
Bayesian information criterion score for each model.
"""
# Initialize array for the number of clusters
n_components = np.arange(min_clusters, max_clusters)
# Run a GMM model for each of the number of components
models = [GMM(n, covariance_type='full', random_state=0).fit(data)
for n in n_components]
# Extract the Schwarz (bayesian) information criterion for each model
bic = [m.bic(data) for m in models]
return bic, models
def run_gmm(data, n_clus = 5):
"""
Returns the results from a Gaussian Mixture model with n_clus.
"""
seed = 47
clus_object = GMM(n_components = n_clus, verbose = True, random_state = seed)
clus_object.fit(data)
labels = clus_object.predict(data)
bic = clus_object.bic(data)
return labels, clus_object, bic
def load_nsaid_names():
classification = {
"salicylates": ["aspirin", "diflunisal", "salsalate"],
"propionic acid derivatives": [ #2-arylpropionic acid scaffold
"ibuprofen",
"dexibuprofen",
"naproxen",
"fenoprofen",
"ketoprofen",
"dexketoprofen",
"flurbiprofen",
"oxaprozin",
"loxoprofen",
"pelubiprofen",
"zaltoprofen",
"pranoprofen",
"suprofen"
],
"acetic acid derivatives":[
"indomethacin",
"tolmetin",
"sulindac",
"etodolac",
"ketorolac",
"diclofenac",
"aceclofenac",
"bromfenac",
"nabumetone",
],
"oxicams": # enolic acid derivatives
[
"piroxicam",
"meloxicam",
"tenoxicam",
"droxicam",
"lornoxicam",
"isoxicam",
"phenylbutazone" # bute
],
"fenamates": #anthranilic acid derivatives,
# anthranlic acid is an nitrogen isostere of salycilate
[
"mefenamic acid",
"meclofenamic acid",
"flufenamic acid",
"tolfenamic acid"
],
"selective cox-2 inhibitors": # coxibs
# have lower risk of gastro bleeding
[
"celecoxib",
"rofecoxib",
"valdecoxib", # withdrawn from market
"parecoxib", # FDA withdrawn
"lumiracoxib",
"etoricoxib", # not FDA approved
"firoxocib" # used in dogs /horses
],
"sulfonanilides":
["nimesulide"],
"others":["clonixin", "licofelone", "harpagide"]
}
# Need
class MO_trainer:
"""
Multiomics trainer.
"""
def __init__(
self,
model,
atac_adata,
split_dict,
batch_size,
train_loader,
val_loader,
rna_adata=None,
lr=3e-6,
n_epochs=20,
model_name=None,
model_dir=None
):
self.model = model
self.batch_size = batch_size
self.atac_adata = atac_adata
self.rna_adata = atac_adata
self.train_loader, self.val_loader = train_loader, val_loader
self.n_epochs = n_epochs
self.criterion = nn.NLLLoss()
self.n_train_batches = len(train_loader.dataset) // batch_size
self.n_test_batches = len(val_loader.dataset) // batch_size
self.cuda = torch.cuda.is_available()
self.device= try_gpu()
self.ordering_labels=torch.arange(batch_size).to(self.device)
# if self.cuda:
# if self.model.logit_scale.device() != self.device:
# self.model = self.model.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr = lr)
self.model_name, self.model_dir = model_name, model_dir
def train_step(
self,
cell_batch,
ix_labels
):
results_dict={
"train_loss": {}
}
self.model.zero_grad()
atac_batch=torch.from_numpy(
self.atac_adata[self.atac_adata.obs.barcodes.isin(ix_labels)].X.A
)
if self.cuda:
cell_batch = cell_batch.cuda()
atac_batch = atac_batch.cuda()
logits = self.model(atac_batch.float(),cell_batch.float())
y_atac=F.log_softmax(logits, dim = 1)
y_rna=F.log_softmax(logits, dim = 0)
# Compute error and average
loss_atac = self.criterion(y_atac, self.ordering_labels)
loss_rna = self.criterion(y_rna, self.ordering_labels)
loss = (loss_atac + loss_rna)/2
atac_acc = accuracy(y_atac.argmax(axis =1), self.ordering_labels)
rna_acc = accuracy(y_rna.argmax(axis = 0), self.ordering_labels)
acc = (atac_acc + rna_acc)/2
# Backprop and update weights
loss.backward()
self.optimizer.step()
results_dict["train_loss"]["contrastive_loss"] = loss.item()
results_dict["train_acc"] = acc
return results_dict
@torch.no_grad()
def val_step(self, cell_batch, ix_labels):
results_dict = {"test_loss": {}}
atac_batch=torch.from_numpy(
self.atac_adata[self.atac_adata.obs.barcodes.isin(ix_labels)].X.A
)
if self.cuda:
cell_batch = cell_batch.cuda()
atac_batch = atac_batch.cuda()
logits = self.model(atac_batch.float(),cell_batch.float())
y_atac=F.log_softmax(logits, dim = 1)
y_rna=F.log_softmax(logits, dim = 0)
# Compute error and average
loss_atac = self.criterion(y_atac, self.ordering_labels)
loss_rna = self.criterion(y_rna, self.ordering_labels)
loss = (loss_atac + loss_rna)/2
atac_acc = accuracy(y_atac.argmax(axis =1), self.ordering_labels)
rna_acc = accuracy(y_rna.argmax(axis = 0), self.ordering_labels)
acc = (atac_acc + rna_acc)/2
results_dict["test_loss"]["contrastive_loss"] = loss.item()
results_dict["test_acc"] = acc
return results_dict
def train(self)-> pd.DataFrame:
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# pylint: disable=E0611
# pylint: disable=E0401
"""Gathers data from dynamodb database and plots it to a Folium Map for display.
This is the main component of the visualization tool. It first gathers data on
all of the segment stored in the dynamodb, then constructs a Folium map which
contains both the route segments and census tract-level socioeconomic data taken
from the American Community Survey. The map is saved as an html file to open
in a web browser.
"""
import os
import json
import boto3
import branca.colormap as cm
import folium
from folium.plugins import FloatImage
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from transit_vis.src import config as cfg
def connect_to_dynamo_table(table_name):
"""Connects to the dynamodb table specified using details from config.py.
Uses the AWS login information stored in config.py to attempt a connection
to dynamodb using the boto3 library, then creates a connection to the
specified table.
Args:
table_name: The name of the table on the dynamodb resource to connect.
Returns:
A boto3 Table object pointing to the dynamodb table specified.
"""
dynamodb = boto3.resource(
'dynamodb',
region_name=cfg.REGION,
aws_access_key_id=cfg.ACCESS_ID,
aws_secret_access_key=cfg.ACCESS_KEY)
table = dynamodb.Table(table_name)
return table
def dump_table(dynamodb_table):
"""Downloads the contents of a dynamodb table and returns them as a list.
Iterates through the contents of a dynamodb scan() call, which returns a
LastEvaluatedKey until there are no results left in the scan. Appends each
chunk of data returned by scan to an array for further use.
Args:
dynamodb_table: A boto3 Table object from which all data will be read
into memory and returned.
Returns:
A list of items downloaded from the dynamodb table. In this case, each
item is a bus route as generated in initialize_db.py.
"""
result = []
response = dynamodb_table.scan()
result.extend(response['Items'])
while 'LastEvaluatedKey' in response.keys():
response = dynamodb_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
result.extend(response['Items'])
return result
def table_to_lookup(table):
"""Converts the contents of a dynamodb table to a dictionary for reference.
Uses dump_table to download the contents of a specified table, then creates
a route lookup dictionary where each key is (route id, express code) and
contains elements for avg_speed, and historic_speeds.
Args:
table: A boto3 Table object from which all data will be read
into memory and returned.
Returns:
A dictionary with (route id, segment id) keys and average speed (num),
historic speeds (list), and local express code (str) data.
"""
# Put the data in a dictionary to reference when adding speeds to geojson
items = dump_table(table)
route_lookup = {}
for item in items:
if 'avg_speed_m_s' in item.keys():
route_id = int(item['route_id'])
local_express_code = item['local_express_code']
hist_speeds = [float(i) for i in item['historic_speeds']]
route_lookup[(route_id, local_express_code)] = {
'avg_speed_m_s': float(item['avg_speed_m_s']),
'historic_speeds': hist_speeds
}
return route_lookup
def write_census_data_to_csv(s0801_path, s1902_path, tract_shapes_path):
"""Writes the data downloaded directly from ACS to TIGER shapefiles.
Reads in data from .csv format as downloaded from the American Community
Survey (ACS) website, then filters to variables of interest and saves. In
this case the two tables are s0801 and s1902, which contain basic
socioeconomic and commute-related variables. Data was downloaded at the
census tract level for the state of Washington. The data is saved in .csv
format to be used in the Folium map.
Args:
s0801_path: A string path to the location of the raw s0801 data, not
including file type ending (.csv).
s1902_path: A string path to the location of the raw s1902 data, not
including file type ending (.csv).
tract_shapes_path: A string path to the geojson TIGER shapefile as
downloaded from the ACS, containing polygon data for census tracts
in the state of Washington.
Returns:
1 after writing the combined datasets to a *_tmp file in the same folder
as the TIGER shapefiles.
"""
# Read in the two tables that were taken from the ACS data portal website
s0801_df = pd.read_csv(f"{s0801_path}.csv")
s1902_df = pd.read_csv(f"{s1902_path}.csv")
# Filter each table to only variables of interest, make more descriptive
commuters_df = s0801_df[[
'GEO_ID', 'NAME',
'S0801_C01_001E', 'S0801_C01_009E']]
commuters_df.columns = [
'GEO_ID', 'NAME',
'total_workers', 'workers_using_transit']
commuters_df = commuters_df.loc[1:len(commuters_df), :]
commuters_df['GEO_ID'] = commuters_df['GEO_ID'].str[-11:]
# Repeat for s1902
households_df = s1902_df[[
'GEO_ID', 'NAME', 'S1902_C01_001E',
'S1902_C03_001E', 'S1902_C02_008E',
'S1902_C02_020E', 'S1902_C02_021E']]
households_df.columns = [
'GEO_ID', 'NAME', 'total_households',
'mean_income', 'percent_w_assistance',
'percent_white', 'percent_black_or_african_american']
households_df = households_df.loc[1:len(households_df), :]
households_df['GEO_ID'] = households_df['GEO_ID'].str[-11:]
# Combine datasets on their census tract ID and write to .csv file
final_df = pd.merge(commuters_df, households_df, on='GEO_ID').drop(columns=['NAME_x', 'NAME_y'])
final_df.to_csv(f"{tract_shapes_path}_tmp.csv", index=False)
return 1
def write_speeds_to_map_segments(speed_lookup, segment_path):
"""Creates a _tmp geojson file with speed data downloaded from dynamodb.
Loads the segments generated from initialize_db.py and adds speeds to them
based on the specified dictionary. Writes a new *_tmp geojson file that will
be loaded by the Folium map and color coded based on segment average speed.
Args:
speed_lookup: A Dictionary object with (route id, local_express_code)
keys and average speed data to be plotted by Folium.
segment_path: A string path to the geojson file generated by
initialize_db.py that contains route coordinate data.
Returns:
A list containing the average speed of each segment that was
successfully paired to a route (and will be plotted on the map).
"""
if isinstance(speed_lookup, dict):
pass
else:
raise TypeError('Speed lookup must be a dictionary')
# Read route geojson, add property for avg speed, keep track of all speeds
speeds = np.ones(0)
with open(f"{segment_path}.geojson", 'r') as shapefile:
kcm_routes = json.load(shapefile)
# Check if each geojson feature has a speed in the database
for feature in kcm_routes['features']:
route_id = feature['properties']['ROUTE_ID']
local_express_code = feature['properties']['LOCAL_EXPR']
if (route_id, local_express_code) in speed_lookup.keys():
speed = speed_lookup[(route_id, local_express_code)]['avg_speed_m_s']
feature['properties']['AVG_SPEED_M_S'] = speed
feature['properties']['HISTORIC_SPEEDS'] = \
speed_lookup[(route_id, local_express_code)]['historic_speeds']
speeds = np.append(speeds, speed)
else:
feature['properties']['AVG_SPEED_M_S'] = 0
feature['properties']['HISTORIC_SPEEDS'] = [0]
# Plot and save the distribution of speeds to be plotted with Folium
plt.figure(figsize=(4, 2.5))
plt.style.use('seaborn')
plt.hist(speeds[np.nonzero(speeds)], bins=15)
plt.xlim((0, 30))
plt.title('Network Speeds')
plt.xlabel('Average Speed (m/s)')
plt.ylabel('Count of Routes')
plt.savefig(f"{segment_path}_histogram.png", bbox_inches='tight')
# Write the downloaded speeds to temp file to be plotted with Folium
with open(f"{segment_path}_w_speeds_tmp.geojson", 'w+') as new_shapefile:
json.dump(kcm_routes, new_shapefile)
return speeds
def generate_folium_map(segment_file, census_file, colormap):
"""Draws together speed/socioeconomic data to create a Folium map.
Loads segments with speed data, combined census data, and the colormap
generated from the list of speeds to be plotted. Plots all data sources on
a new Folium Map object centered on Seattle, and returns the map.
Args:
segment_file: A string path to the geojson file generated by
write_speeds_to_map_segments that should contain geometry as well
as speed data.
census_file: A string path to the geojson file generated by
write_census_data_to_csv that should contain the combined s0801 and
s1902 tables.
colormap: A Colormap object that describes what speeds should be mapped
to what colors.
Returns:
A Folium Map object containing the most up-to-date speed data from the
dynamodb.
"""
# Read in route shapefile and give it styles
kcm_routes = folium.GeoJson(
name='King Country Metro Speed Data',
data=f"{segment_file}_w_speeds_tmp.geojson",
style_function=lambda feature: {
'color': 'gray' if feature['properties']['AVG_SPEED_M_S'] == 0 \
else colormap(feature['properties']['AVG_SPEED_M_S']),
'weight': 1 if feature['properties']['AVG_SPEED_M_S'] == 0 \
else 3},
highlight_function=lambda feature: {
'fillColor': '#ffaf00', 'color': 'blue', 'weight': 6},
tooltip=folium.features.GeoJsonTooltip(
fields=['ROUTE_NUM', 'AVG_SPEED_M_S',
'ROUTE_ID', 'LOCAL_EXPR', 'HISTORIC_SPEEDS'],
aliases=['Route Number', 'Most Recent Speed (m/s)',
'Route ID', 'Local (L) or Express (E)', 'Previous Speeds']))
# Read in the census data/shapefile and create a choropleth based on income
seattle_tracts_df = | pd.read_csv(f"{census_file}_tmp.csv") | pandas.read_csv |
import argparse
import pandas as pd
import itertools
import os
from ast import literal_eval
from helper_functions import comorbidity_indicator
# Computes basic stats for a data segment; takes in a Pandas Dataframe with
# appropriate fields (sum_costs and num_comorbidities).
def segment_stats(data):
row_dict = {}
row_dict['avg_cost'] = data.sum_costs.mean()
row_dict['std_cost'] = data.sum_costs.std()
row_dict['num_person_years'] = data.shape[0]
row_dict['avg_num_com'] = data.no_comorbidities.mean()
row_dict['std_num_com'] = data.no_comorbidities.std()
return row_dict
def main(args):
df_merged = pd.read_csv(args.input_file)
df_merged.classes = df_merged.classes.apply(lambda x: literal_eval(x))
df_merged.agg_indices = \
df_merged.agg_indices.fillna('None').apply(lambda x: literal_eval(x))
df_merged.sex.fillna('Not provided')
pop_N = df_merged.shape[0] # grab total number of person-years
# IMPORT DESCRIPTION INDEX
desc_dict = pd.read_excel(args.disease_dict).long.to_dict()
# We will be grouping by pairs of comorbid diseases.
disease_dyads = [i for i in itertools.combinations(range(len(desc_dict)), 2)]
# Ancillary function to convert pairs of disease indices to pairs of
# disease descriptions.
def map_dyad(d_dyad):
return [desc_dict[d_dyad[0]], desc_dict[d_dyad[1]]]
bins = [0,18,35,50,65,150]
age_labels = ['0-18','18-35','35-50','50-65','65+']
df_merged['age_bin'] = pd.cut(df_merged.age, bins=bins, labels=age_labels)
sexes = ['F','M']
# convert comorbidity list to boolean columns
## comorbidity_indicator()
for i in range(len(desc_dict)):
df_merged[desc_dict[i]] = comorbidity_indicator(df_merged,[str(i)])
# disease stats
diseases_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Now adds smoothing to the series. Smoothing parameters are added to our models
hyperparameters.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, LSTM, GRU, Dense, Dropout, LayerNormalization, Bidirectional #BatchNormalization - NO
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from scipy.ndimage import gaussian_filter1d
# =============================================================================
# CLASSES & FUNCTIONS
# =============================================================================
class BuildModel():
"""
Build a model. Arguments allow one to customise the hyper parameters
ATTRIBUTES :-
length - number of steps in time sequence to feed the rnn
layers_num - number of rnn layers in model (capped at 3)
layers_type - select "LSTM" or "GRU"
units - number of units in rnn layers
num_step_preds - number of steps/days in time to predict
dropout - dropout % to be applied to rnn units
g_filt - gaussian filter for smoothing. Default: no smoothing
batch_size - number of samples to feed model at a time.
patience - how many epochs to wait before stopping model after finding good score.
model_name - file name of model we save. must end in ".h5" eg 'temp_model.h5'
"""
def __init__(self, model_name, length=10, layers_num=1, layers_type='LSTM',\
units=50, dropout=0.0, g_filt=00.1, num_step_preds=1,\
epochs=8, batch_size=1, patience=5):
#assertions for input
assert 0 < layers_num < 4, "1 <= layers_num <= 3"
assert layers_type in ['LSTM', 'GRU'], "layers_type is LSTM or GRU"
assert 0 <= dropout < 1, "dropout must be float < 1"
assert model_name[-3:] == '.h5', "End model_name with '.h5'"
#initialise
self.model_name = model_name
self.length = length
self.layers_num = layers_num
self.layers_type = layers_type
self.units = units
self.num_step_preds = num_step_preds
self.dropout = dropout
self.g_filt = g_filt
self.epochs = epochs
self.batch_size = batch_size
self.n_features = 1
#callbacks
self.callbacks =[EarlyStopping(monitor='val_loss', patience=patience),\
ModelCheckpoint(self.model_name, monitor='val_loss',\
save_best_only=True)]
#BUILD MODEL
##inputs
self.model = Sequential()
self.model.add(InputLayer(input_shape=(self.length, self.n_features)))
##add extra layers as required (or not if layers_num = 1)
for i in range(layers_num - 1):
self.model.add(eval('{}(units={}, dropout={}, return_sequences=True)'\
.format(self.layers_type, self.units, self.dropout)))
##closing rnn layer (do not return squences)
self.model.add(eval('{}(units={}, dropout={})'\
.format(self.layers_type, self.units, self.dropout)))
##Dense output
self.model.add(Dense(units=self.num_step_preds))
#compile model
self.model.compile(optimizer='adam', loss='mse', metrics=['mae'])
def setupData(self, series, val_days=450):
"""
splits data, scales data, creates generators for the model
"""
assert val_days > self.length , "val_days must exceed lenght"
#split data into train and validation
self.train = series.iloc[:-val_days]
self.validation = series.iloc[-val_days:]
#Apply smoothing filters
self.train_smooth = \
gaussian_filter1d(self.train, self.g_filt)\
.reshape(-1,1)
self.validation_smooth = \
gaussian_filter1d(self.validation, self.g_filt)\
.reshape(-1,1)
#create time series generators
self.generator = \
TimeseriesGenerator(data=self.train_smooth,\
targets=self.train_smooth,\
length=self.length,\
batch_size=self.batch_size)
self.val_generator = \
TimeseriesGenerator(data=self.validation_smooth,\
targets=self.validation_smooth,\
length=self.length,\
batch_size=self.batch_size)
def fitModel(self):
"""
Fits the model on your generators for training and validation sets.
EarlyStopping call back ends training if val_loss doesnt improve.
Record epoch metrics in a DataFrame.
"""
self.model.fit(self.generator, validation_data=self.val_generator,\
epochs=self.epochs, callbacks=self.callbacks)
self.history = | pd.DataFrame(self.model.history.history) | pandas.DataFrame |
import copy
import itertools
import os
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.preprocessing import PowerTransformer
from scipy.stats import yeojohnson
from tqdm import tqdm
import tensorflow as tf
import warnings
warnings.simplefilter("ignore")
n_wavelengths = 55
n_timesteps = 300
class read_Ariel_dataset():
def __init__(self, noisy_path_train, noisy_path_test, params_path, start_read):
"""
For reading Ariel Dataset.
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy training files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy test files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param params_path: (str) The *relative path's parent directory* from the current
working directory to all params files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param start_read: (int) How many data points to replace at the beginning of the
file. Used for preprocessing of files by replacing values before start_read
with 1.0 to minimize impact of the drop valley.
"""
super().__init__()
self.noisy_path = noisy_path_train
self.noisy_path_test = noisy_path_test
self.params_path = params_path
self.start_read = start_read
# list all files in path(s).
self.noisy_list= os.listdir(self.noisy_path)
self.noisy_list_test = os.listdir(self.noisy_path_test)
self.params_list = os.listdir(self.params_path)
# Grouped by AAAA:
self.group_noisy_list = self._group_list(self.noisy_list)
self.group_noisy_list_test = self._group_list(self.noisy_list_test)
self.group_params_list = self._group_list(self.params_list)
def _group_list_return(self):
"""
Only used for unit test purposes.
Return self.group_noisy_list and assert it is true.
"""
return self.group_noisy_list
def _choose_train_or_test(self, folder="noisy_train", batch_size=1):
"""Private function to choose train or test.
:param batch_size (int): The batch size to take. NotImplemented yet.
"""
if folder == "noisy_train":
path = self.noisy_path
files = self.noisy_list
elif folder == "noisy_test":
path = self.noisy_path_test
files = self.noisy_list_test
else:
raise FileNotFoundError("Not in the list (noisy_train, noisy_test). "
"Please input the choices in the list stated and try again.")
return path, files
def _len_noisy_list(self):
return len(self.noisy_list)
def unoptimized_read_noisy(self, folder="noisy_train", **kwargs):
"""
Read noisy files greedily, stacking them on the first axis.
First axis is the time series axis. So a file with 300x55, read
3 files would be 900x55.
:param folder (str): Which folder to do baseline transition. Choices:
"noisy_train" (default), "noisy_test".
"""
path, files = self._choose_train_or_test(folder=folder, **kwargs)
predefined = pd.DataFrame()
for item in files:
# Concatenate filename and their parent folder.
relative_file_path = path + "/" + item
# Renaming the columns
names = [item[-14:-4] + f"_{i}" for i in range(n_timesteps)]
curr = pd.read_csv(relative_file_path, delimiter="\t", skiprows=6, header=None)
curr.rename(columns={x: y for x, y in zip(curr.columns, names)}, inplace=True)
# Concatenating the pandas.
predefined = pd.concat([predefined, curr], axis=1)
return predefined
def unoptimized_read_params(self):
"""
Read params files greedily, stacking them on the first axis.
"""
predefined = pd.DataFrame()
for item in self.params_list:
# Relative file path:
relative_file_path = self.params_path + "/" + item
names = [item[-14:-4]] # Have to be a list to take effect
curr = pd.read_csv(relative_file_path, delimiter="\t", skiprows=2, header=None).T
curr.rename(columns = {x: y for x, y in zip(curr.columns, names)}, inplace=True)
predefined = pd.concat([predefined, curr], axis=1)
return predefined
def _group_list(self, mylist):
"""
Group list together. Here the function is specific to group AAAA together into
a sublist to not cramp the memory and dataframe I/O.
"""
return [list(v) for i, v in itertools.groupby(mylist, lambda x: x[:4])]
def read_noisy_extra_param(self, folder="train", saveto="./feature_store/noisy_train"):
"""
Read the extra 6 stellar and planet parameters in noisy files.
:param folder (str): "train" or "test" choice. Default "train" for noisy train set.
:param saveto (str): The directory to save to. Will make the directory if not
already exists.
"""
header = ["star_temp", "star_logg", "star_rad", "star_mass", "star_k_mag", "period"]
predefined = pd.DataFrame()
if saveto[-1] != "/":
saveto += "/"
try:
os.makedirs(saveto)
except OSError as e:
pass
if folder == "train":
path = self.noisy_path
mylist = self.group_noisy_list
elif folder == "test":
path = self.noisy_path_test
mylist = self.group_noisy_list_test
else:
raise ValueError("Invalid 'folder' entry. Please choose between 'train' or 'test'.")
# To ensure small enough, read them into groups of csv first.
for grouped_item in tqdm(mylist):
for item in grouped_item:
temp_storage_float = []
relative_file_path = path + "/" + item
with open(relative_file_path, "r") as f:
temp_storage_str = list(itertools.islice(f, 6))
# Preprocess for numbers only
for string in temp_storage_str:
# Separate the digits and the non-digits.
new_str = ["".join(x) for _, x in itertools.groupby(string, key=str.isdigit)]
# Only new_str[0] is the one we want to omit.
# We want to join back into a single string because "." previously is classifed
# as non-digit.
new_str = "".join(new_str[1:])
# Convert to float.
temp_storage_float.append(float(new_str))
# Convert to pandas DataFrame.
temp_storage_float = pd.DataFrame(temp_storage_float)
# Define file name
names = [item[-14:-4]]
# Change the column name
temp_storage_float.rename(columns =
{x: y for x, y in zip(temp_storage_float.columns, names)},
inplace=True
)
# Change the row names for predefined (optional for readability)
temp_storage_float.rename(index = {x: y for x, y in zip(range(6), header)},
inplace=True)
predefined = pd.concat([predefined, temp_storage_float], axis=1)
predefined.to_csv(saveto + item[:4] + ".csv")
# Reset predefined
predefined = pd.DataFrame()
# Then concatenate the csv files.
saved_list = os.listdir(saveto)
predefined = pd.DataFrame()
for item in saved_list:
relative_file_path = saveto + item
name = [item[:-4]] # ignore the .csv at the end.
temp_df = pd.read_csv(relative_file_path, index_col=0)
predefined = pd.concat([predefined, temp_df], axis=1)
return predefined
def read_params_extra_param(self, saveto="./feature_store/params_train"):
"""
Read the extra 2 intermediate target params in the params files.
"""
header = ["sma", "incl"]
predefined = pd.DataFrame()
if saveto[-1] != "/":
saveto += "/"
try:
os.makedirs(saveto)
except OSError as e:
pass
mylist = self.group_params_list # Since we only have one folder, so hardcoded here.
for grouped_item in tqdm(mylist):
for item in grouped_item:
temp_storage_float = []
relative_file_path = self.params_path + "/" + item
with open(relative_file_path, "r") as f:
temp_storage_str = list(itertools.islice(f, 2))
# Preprocess for numbers only
for string in temp_storage_str:
# Separate the digits and the non-digits.
new_str = ["".join(x) for _, x in itertools.groupby(string, key=str.isdigit)]
# Only new_str[0] is the one we want to omit.
# We want to join back into a single string because "." previously is classifed
# as non-digit.
new_str = "".join(new_str[1:])
# Convert to float.
temp_storage_float.append(float(new_str))
# Convert to pandas DataFrame.
temp_storage_float = pd.DataFrame(temp_storage_float)
# Define file name
names = [item[-14:-4]]
# Change the column name
temp_storage_float.rename(columns =
{x: y for x, y in zip(temp_storage_float.columns, names)},
inplace=True
)
# Change the row names for predefined (optional for readability)
temp_storage_float.rename(index = {x: y for x, y in zip(range(6), header)},
inplace=True)
predefined = pd.concat([predefined, temp_storage_float], axis=1)
predefined.to_csv(saveto + item[:4] + ".csv")
# Reset predefined
predefined = pd.DataFrame()
# Then concatenate the csv files.
saved_list = os.listdir(saveto)
predefined = pd.DataFrame()
print(saved_list)
for item in saved_list:
relative_file_path = saveto + item
name = [item[:-4]] # ignore the .csv at the end.
temp_df = pd.read_csv(relative_file_path, index_col=0)
predefined = pd.concat([predefined, temp_df], axis=1)
return predefined
def data_augmentation_baseline(self, folder="noisy_train", extra_transform=None, **kwargs):
"""
Data augmentation: What is being done to the data by the Baseline
model done by the organizer.
:param folder (str): Which folder to do baseline transition. Choices:
"noisy_train" (default), "noisy_test".
:param extra_transform (str): Are there any other transformation you would like
to make before going into final transform? Note: only restricted support.
Choose from "log", "sqrt" and "square".
"""
# Read file
df = self.unoptimized_read_noisy(folder=folder, **kwargs)
path, files = self._choose_train_or_test(folder=folder, **kwargs)
# Transformation 1: First 30 points of each light curve are replaced
# by 1 to reduce the impact from the ramps.
# Get all files according to how column names are defined.
label_names = [x[-14:-4] for x in files]
for label_name in label_names:
for i in range(self.start_read):
for j in range(n_wavelengths):
df[str(label_name) + "_" + str(i)][j] = 1
# Extra transformation outside of what is being done in baseline.
# Tests yet to be implemented.
for i in range(n_wavelengths):
if extra_transform == "log":
df.iloc[i] = np.log(df.iloc[i])
elif extra_transform == "sqrt":
df.iloc[i] = np.sqrt(df.iloc[i])
elif extra_transform == "square":
df.iloc[i] = np.square(df.iloc[i])
# Transformation 2: -1 to all data points in the file.
df = df - 1
# Transformation 3: Values rescaled by dividing by 0.06 for standard deviation
# closer to unity.
df /= 0.04
return df
def read_noisy_vstacked(self, from_baseline=True, dataframe=None, **kwargs):
"""
Read file vstacked on each other instead of concatenating along the column.
So for example, our file with timestep of 300 for 3 files, instead of returning
for one single wavelength shape of (1, 900) will return (3, 300) instead.
This way we aggregate all one single wavelength onto one block and continue vstacking
downwards, keeping the rows = 300 constant.
:param from_baseline (bool): get data from data_augmentation_baseline
directly or insert data yourself? Default to True.
:param dataframe (pandas.DataFrame): the data to be passed in. Only to be used
if from_baseline = False, otherwise default to None.
"""
if from_baseline == True:
df = self.unoptimized_read_noisy(**kwargs)
else:
df = dataframe
new_df = pd.DataFrame()
for key, value in df.iterrows():
start_count_sectors = 0
end_count_sectors = n_timesteps
# To iterate for every 300 timesteps since this is from a single file.
while end_count_sectors <= len(value):
data = np.array(value[start_count_sectors: end_count_sectors])
new_df = new_df.append(pd.DataFrame(data).T, ignore_index = True)
start_count_sectors = end_count_sectors
end_count_sectors += n_timesteps
return new_df
def yeo_johnson_transform(self, from_baseline=True, dataframe=None, original_shape=True, **kwargs):
"""
The Yeo-Johnson Transform: https://www.stat.umn.edu/arc/yjpower.pdf
To "normalize" a non-normal distribution (i.e. transform from non-Gaussian
to Gaussian distribution), for a mix of positive and negative numbers,
(or strictly positive or strictly negative).
:param from_baseline (bool): get data from data_augmentation_baseline
directly or insert data yourself? Default to True.
:param dataframe (pandas.DataFrame): the data to be passed in. Only to be used
if from_baseline = False, otherwise default to None.
:param original_shape (bool): Whether to concatenate back to original shape of (x, 55).
If not True, it will choose a shape of (300, y) instead for easy reading.
Defaults to True.
"""
if from_baseline == True:
df = self.data_augmentation_baseline(**kwargs)
else:
df = dataframe
# pt = PowerTransformer(method="yeo-johnson")
try:
new_df = | pd.DataFrame() | pandas.DataFrame |
import pylab as plt; import numpy as np; import pandas as pd
import math; import json; from numpy.random import random, normal, uniform, randint
from scipy.interpolate import interp1d; from astropy_healpix import HEALPix;
from astropy.coordinates import ICRS, SkyCoord; from astropy import units as u;
from timeit import default_timer as timer
start = timer()
N = 1000 ##Change to alter the number of loops the code runs for
placement = np.zeros(N)
placement2 = np.zeros(N)
placement3 = np.zeros(N)
placement4 = np.zeros(N)
placement5 = np.zeros(N)
placement6 = np.zeros(N)
placement7 = np.zeros(N)
placement8 = np.zeros(N)
placement9 = np.zeros(N)
placement10 = np.zeros(N)
placement11 = np.zeros(N)
placement12 = np.zeros(N)
placement13 = np.zeros(N)
placement14 = np.zeros(N)
placement15 = np.zeros(N)
placement16 = np.zeros(N)
placement17 = np.zeros(N)
placement18 = np.zeros(N)
placement19 = np.zeros(N)
placement20 = np.zeros(N)
placement21 = np.zeros(N)
placement22 = np.zeros(N)
placement23 = np.zeros(N)
percentages = np.zeros(N)
percentages2 = np.zeros(N)
percentages3 = np.zeros(N)
percentages4 = np.zeros(N)
percentages5 = np.zeros(N)
percentages6 = np.zeros(N)
percentages7 = np.zeros(N)
percentages8 = np.zeros(N)
percentages9 = np.zeros(N)
percentages10 = np.zeros(N)
percentages11 = np.zeros(N)
percentages12 = np.zeros(N)
percentages13 = np.zeros(N)
percentages14 = np.zeros(N)
percentages15 = np.zeros(N)
percentages16 = np.zeros(N)
percentages17 = np.zeros(N)
percentages18 = np.zeros(N)
percentages19 = np.zeros(N)
percentages20 = np.zeros(N)
percentages21 = np.zeros(N)
percentages22 = np.zeros(N)
percentages23 = np.zeros(N)
no_se_func = []
ras_dex = np.zeros(shape = (N, 2))
test_case = np.zeros(shape = (N, 2))
def Ang_Dist(ra1, ra2, dec1, dec2):## Calculates the angular distance between apparent position and galaxy
ra1 *= (np.pi/180); ra2 *= (np.pi/180)
dec1 *= (np.pi/180); dec2 *= (np.pi/180)
return (180/np.pi) * np.arccos(np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(ra1 - ra2))
#################################################################
"""
def rank(theta, sigma, d_lum, luminosity, luminosity_probability): ## Normal
## Implements a ranking statistic defined in report
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability #* Colour_factor
def rank2(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank3(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity Distance
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank4(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank5(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank6(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_Lum, Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank7(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_lum, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank8(theta, sigma, d_lum, luminosity, luminosity_probability): ## All
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank9(theta, sigma, d_lum, luminosity, luminosity_probability): ## Angular Distance
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank10(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, D_Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank11(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank12(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum_Prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank13(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Ang_Dist
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank14(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank15(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except d_lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank16(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum_prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank17(theta, sigma, d_lum, luminosity, luminosity_probability): ## No angular Distance
return np.exp(0 * -(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank18(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Distance
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability
def rank19(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity**0)[:, 0] * luminosity_probability**2
def rank20(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Probability
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**0
def rank21(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 1
return np.exp(-(2 * theta**2/((sigma)**2))) * (1/d_lum**8 * luminosity)[:, 0] * luminosity_probability**2
def rank22(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-((theta**2) * (sigma**2))/(2)) * (1/d_lum**8 * luminosity)[:, 0] * luminosity_probability**2
def rank23(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-((theta**2)**100/(2 * (sigma)**2))) * (1/d_lum**8 * luminosity)[:, 0] * luminosity_probability**2
"""
#################################################################
#Daves old functions before I fixed them
def rank(theta, sigma, d_lum, luminosity, luminosity_probability): ## Normal
## Implements a ranking statistic defined in report
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability #* Colour_factor
def rank2(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank3(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity Distance
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank4(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank5(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank6(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_Lum, Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank7(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_lum, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank8(theta, sigma, d_lum, luminosity, luminosity_probability): ## All
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank9(theta, sigma, d_lum, luminosity, luminosity_probability): ## Angular Distance
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank10(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, D_Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank11(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank12(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum_Prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank13(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Ang_Dist
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank14(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank15(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except d_lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank16(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum_prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank17(theta, sigma, d_lum, luminosity, luminosity_probability): ## No angular Distance
return np.exp(0 * -(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank18(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Distance
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability
def rank19(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity**0)[:, 0] * luminosity_probability**2
def rank20(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Probability
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**0
def rank21(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 1
return np.exp(-(theta**2/(2 * (sigma)**2)))**(4) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability**2
def rank22(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-(theta**2/(2 * (sigma)**2)))**(sigma**4) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability**2
def rank23(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-((theta**2)**100/(2 * (sigma)**2))) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability**2
#################################################################
def convert(h, m, s): #Hours minutes seconds to degrees (More for applied code than here)
return h + (m/60) + (s/3600)
#################################################################
def Luminosity_Handling(magnitude): ##Converts Absolute B Magnitude to Luminosity
solar_b = 4.74
solar_l = 1 #3.846e26 W
return solar_l * 10**(0.4 * (solar_b - magnitude)) ## Gives an array in terms of solar luminosity
###########################################################
def spherical_convert(ra, dec): ##Test ##Converts ra and dec to an xyz array
r = 1
#ra = phi
#dec = theta
##Convert to radians
ra = ra * np.pi/180
dec = dec * np.pi/180
x = np.cos(ra) * np.cos(dec)
y = np.sin(ra) * np.cos(dec)
z = np.sin(dec)
return np.array([x, y, z])
############################################################
def rotation(x, angle):##Test #Rotation about the z axis
#need angle in radians
rotation = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
return x * rotation
############################################################
def back_convert(axyz): ##Test ## Converts xyz coordinates to ra and dec
x = axyz[0]
y = axyz[1]
z = axyz[2]
r = modulus(axyz)
arg1 = float(y/x)
arg2 = float(z/r)
phi = np.arctan(arg1)
theta = np.arccos(arg2)
return (180/np.pi) * phi, (90 - theta * (180/np.pi))## Returns ra, dec in that order in degrees
#################################################################
def modulus(array): ##Test ##Finds the modulus of a matrix/array
return np.sqrt(array[0]**2 + array[1]**2 + array[2]**2)
#################################################################
def find_nearest(array, value): #Kind of a hash and not exactly interpolation, but for this point, should be okay
array = np.asarray(array) - value
truey = [i for i, val in enumerate(array) if val >= 0]
idx = truey[0]#(np.abs(array - value)).argmin()
return idx
#################################################################
def reduction(RA_dec, Dec_dec, df_master): ##Reduces the df_master by considering angular distance
#host = df_master.iloc[current_i]
#RA_dec = ra_prime[0]#host[["RA"]].values.tolist()[0]
#Dec_dec = dec_prime[0]#host[["dec"]].values.tolist()[0]
## Testing purposes only (hashed out lines)
RA = df_master[["RA"]].values.tolist()
ra_arry = np.isclose(RA, RA_dec, atol = error_radius)
res_ra = [i for i, val in enumerate(ra_arry) if val == False] ##Something up here - removing too many items
DEC = df_master[["dec"]].values.tolist()
dec_arry = np.isclose(DEC, Dec_dec, atol = error_radius)
res_dec = [i for i, val in enumerate(dec_arry) if val == False]
indices_to_keep = set(range(df_master.shape[0])) - set(res_ra) - set(res_dec)
df_sliced = pd.DataFrame.take(df_master, list(indices_to_keep), axis = 0)
ra = df_sliced[["RA"]].values
dec = df_sliced[["dec"]].values
return np.array(ra[:, 0]), np.array(dec[:, 0]), df_sliced
#################################################################
def Luminosity_back_convert(L_given, d_L): # ##Converts luminosity to luminosity at source
#L = L0/4 *np.pi * d_l**2
return (L_given) * (4 * np.pi * (3.086e22 * d_L)**2)
def Luminosity_for_convert(L_given, d_L): # ##Converts luminosity at source to apparent luminosity
return(L_given)/(4 * np.pi * (3.086e22 * d_L)**2)
#################################################################
def L_func(L_test, c, d_L): ## ##Takes an input and returns a probability based on the broken power law
L_star = np.log10(4.61e51 * 1e7) ##All from Guetta/Piran 2005
del_1 = 30
del_2 = 10
alpha = 0.5
beta = 1.5
L = np.zeros(len(d_L))
SGR_test = np.zeros(len(d_L))
for j in range(len(d_L)): ## Slightly inefficient, but on the scales of reduced catalog, not too drastic
L[j] = np.log10(Luminosity_back_convert(L_test, d_L[j]))
L_prob = np.zeros(len(L))
for i in range(len(L)):
if L[i] < L_star and (L_star/del_1) < L[i]:
L_prob[i] = c * (L[i]/L_star)**-alpha
elif L[i] > L_star and L[i] < del_2 * L_star:
L_prob[i] = c * (L[i]/L_star)**-beta
elif L[i] < (L_star/del_1):
L_prob[i] = 0 ## What to do when the values fall outside the range that defines the power law?
SGR_test[i] = 1 ##Creates a flag for if the luminosity at source would be low enough to be considered an SGR
else:
L_prob[i] = 0
return L_prob, SGR_test
#################################################################
def L_func1(L): ## ##Builds the broken power law based on a log scale from 52 to 59
L_star = np.log10(4.61e51 * 1e7)
del_1 = 30
del_2 = 10
alpha = 0.5
beta = 1.5
N = len(L)
L2 = np.zeros(N)
summ = 0
sum1 = np.zeros(N)
for i in range(N):
if L[i] < L_star and (L_star/del_1) < L[i]:
L2[i] = (L[i]/L_star)**-alpha
elif L[i] > L_star and L[i] < del_2 * L_star:
L2[i] = (L[i]/L_star)**-beta
else:
L2[i] = L_star
summ += L2[i]
c = 1/(summ)
sum1[i] = summ
L2 *= c
return L2, c
#################################################################
def cumulative(array): ### #Builds cumulative distributions
N = array.shape[0]
summing = np.zeros(N + 1)
#array = L2
for i in range(1, N + 1):
df = pd.DataFrame(array[:i])
summing[i] = df.sum().values.tolist()[0]
return summing# /= summing[-1]
##If you have N galaxies
##########################################################################################
def axis_rotation(axis, point, angle): ## Rotation about an axis function
init_matrix = np.array([[0, -1 * axis[2], axis[1]],
[axis[2], 0, -1 * axis[0]],
[-1 * axis[1], axis[0], 0]])
matrix_2 = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
term_2 = np.sin(angle) * init_matrix
rot_matrix = (1 - np.cos(angle)) * np.dot(init_matrix, init_matrix) + term_2 + matrix_2
rotated_point = np.dot(rot_matrix, point)
return rotated_point
def Sector_find(RA_grb, Dec_grb, err_radius):
'''
Give coordinates of the grb location and an error in the position, this function
will use cone_search to find all sky sectors that the cone intersects and
will read the corresponding csv files and compile them into one dataframe
'''
#corrects for if the rotations of the galaxy coords puts the GRB in an invalid position
if abs(Dec_grb) > 90:
x = RA_grb
parity = Dec_grb/abs(Dec_grb)
Dec_grb = (180 - abs(Dec_grb))*parity
RA_grb = RA_grb + 180
if RA_grb > 360:
RA_grb = x - 180
elif RA_grb < 0:
RA_grb = 360 + RA_grb
#making the sky coordinates
coords = SkyCoord(RA_grb, Dec_grb, unit = "deg")
#finding intersecting sectors
sectors = hp.cone_search_skycoord(coords, radius = err_radius*u.degree)
#making the empty dataframe
df_container = pd.DataFrame()
for i in sectors:
'''
loop over the intersecting sectors to read the files and append to
the df_container
'''
name = name = str("Sector_{}".format(i))
holder = pd.read_csv("Data Files/GLADE_Sectioned/{}.csv".format(name),\
delimiter = ",", index_col = 0)
df_container = df_container.append(holder)
return df_container
#########################################################################################
#########################################################################################
df_master = pd.read_csv("Data Files/GLADE_Master.csv", delimiter = ",", low_memory = False) ##GLADE_Master.csv previously defined
L1 = np.linspace(56, 59, 101) #In J now
L2, c = L_func1(L1) # ##Builds broken power law
cumuL = cumulative(L2) ##Luminosity Distribution
df_cumLum = pd.read_csv("Data Files/Cumulative Luminosity.csv")
df_cumLum.columns = ["NaN", "Cumulative Luminosity"]
normal_c = df_cumLum[["Cumulative Luminosity"]].values[-1][0]
L_rank = df_cumLum[["Cumulative Luminosity"]].values * 1/normal_c
df_cumLum = df_cumLum[["Cumulative Luminosity"]].values# ## This is all to do with building a usable and callable power law
lum_N = np.linspace(0, df_cumLum.shape[0], df_cumLum.shape[0])
df_dL = df_master[["Luminosity Distance"]]
#using HEALPix to split the sky into equal area sectors
hp = HEALPix(nside=16, order='ring', frame=ICRS())
tests = randint(0, 2, size = N) ## If tests[i] = 0, use test galaxy, or if = 1, choose random point beyond the catalog
dummies = random(N)
RandL = random(N)
gals = np.zeros(N) ## Picks out a luminosity
gal_index = np.zeros(N)
"""
aa = np.zeros(shape = (N, 5)) # Storing Angular distance
ab = np.zeros(shape = (N, 5)) # Storing Luminosity Distance
ac = np.zeros(shape = (N, 5)) # Storing B Luminosity
ad = np.zeros(shape = (N, 5)) # Storing Luminosity Probability
"""
lum_atsource = np.zeros(N)
received_luminosity = np.zeros(N)
cumul_N = np.zeros(N)
lum_list = list(L_rank)
df_dL = df_dL.values.tolist() ## Luminosity distance values for use
a = np.zeros(N) ## For storing temporary and unimportant values
b = np.zeros(N) ## For storing temporary and unimportant values
test_ra = df_master[["RA"]]
test_dec = df_master[["dec"]]
indices = list(np.arange(df_master.shape[0]))
error_radius = 2 * (2.62) ## Change as necessary - this is an example value from HEARSCH
percentages = np.zeros(N)
distances = np.zeros(N)
luminosity_i = np.zeros(N)
rank_host = np.zeros(N)
faulty = np.zeros(shape = (N, 5)) ## All of this used to store values
phi = 2 * np.pi * random(N) * (180/np.pi) ## Random positions for rotations
theta = np.arccos(2 * random(N) - 1) * (180/np.pi)
thph = spherical_convert(theta, phi)
mod = np.zeros(N)
for i in range(N):
mod[i] = modulus(thph[:, i])
thph[:, i] /= mod[i]
xyz = np.zeros(shape = (N, 3))
m = np.zeros(shape = (N, 3))
ra_prime = np.zeros(N); dec_prime = np.zeros(N)
rotation_angle = error_radius * normal(size = N) * (np.pi/180)
"""
placement18 = np.zeros(N)
percentages18 = np.zeros(N)
for i in range(N):
current_i = indices.index(gals[i])
testr = np.array(test_ra.iloc[[current_i]].values.tolist())
testd = np.array(test_dec.iloc[[current_i]].values.tolist())
ident = np.zeros(df_master.shape[0])
print(str(i + 1), "out of " + str(N))
print("Test galaxy: ", str(gals[i]))
ident[current_i] = 1
df_master["Identifier"] = ident ## Creates a mask for identifying the host galaxy
q, t, df_sliced = reduction(abs(ra_prime[i]), dec_prime[i], df_master) ## Reduces the catalog by RA and dec
ra = np.array(df_sliced[["RA"]].values.tolist())[:, 0]
dec = np.array(df_sliced[["dec"]].values.tolist())[:, 0]
Luminosity = np.array(df_sliced[["B Luminosity"]].values.tolist()) #Luminosity_Handling(np.array(df_sliced[["Absolute B Magnitude"]].values.tolist())) ## Converts A
dl = np.array(df_sliced[["Luminosity Distance"]].values.tolist())
lum_prob, SGR_test = L_func(received_luminosity[i], c, dl) ##Uses the luminosity function to calculate probabilities
df_sliced["Luminosity Probability"] = lum_prob
df_sliced["SGR flag"] = SGR_test
angular_distaance = np.zeros(df_sliced.shape[0])
for k in range(df_sliced.shape[0]):
angular_distaance[k] = Ang_Dist(ra[k], testr[0][0], dec[k], testd[0][0])
df_sliced["Angular Distance"] = angular_distaance
ranking23 = rank23(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank23"] = ranking23
df_sliced18 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank23"], ascending = False))
id18 = df_sliced18[["Identifier"]].values.tolist()
mask_check18 = [i for i, val in enumerate(id18) if val == [1]]
Luminosity = np.asarray(Luminosity)
if len(mask_check18) == 0:
print("Did not place\n\n\n")
next
else:
length = len(id18) + 1
placement18[i] = mask_check18[0] + 1
#display(Markdown("The keplerian orbit appears to be happening at r ={0:.2f} km" .format(float(kepler(M_kep, w))/1000)))
#print("Galaxy data: \nDistance is {0:.2f} Mpc\nLuminosity is {1:.3e}\nra and dec [{2:.2f}, {3:.2f}] compared to reported ra and dec [{4:.2f}, {5:.2f}] \nTrue luminosity {6:.3e} W" .format(dl[int(placement18[i] - 1)][0], Luminosity[int(placement18[i] - 1)][0], fin_ra[int(placement18[i] - 1)][0], fin_dec[int(placement18[i] - 1)][0], testr[0][0], testd[0][0], b[i]))
print("Galaxy placed", int(placement18[i]), "out of", str(length), "with statistic 18\n\n\n")
percentages18[i] = placement18[i]/length
"""
for i in range(N):
gals[i] = find_nearest(L_rank, dummies[i]) ## Picks out galaxies from the cumulative luminosity distribution
a[i] = (find_nearest(cumuL, (RandL[i])))
if a[i] == len(L1):
a[i] = len(L1) - 1
b[i] = 10**(L1[int(a[i])])
received_luminosity[i] = Luminosity_for_convert((b[i]), df_dL[int(gals[i])][0])
## Takes dummy luminosity and converts it to luminosity at source by using the luminosity distance of
## the host galaxy
current_i = indices.index(gals[i])
testr = np.array(test_ra.iloc[[current_i]].values.tolist())
testd = np.array(test_dec.iloc[[current_i]].values.tolist())
## Extracting data about the host
##Rotation of test ra and dec
####################################################################################################
xyz[i, :] = spherical_convert((50), (10))
m[i, :] = np.cross(xyz[i, :], thph[:, i])#Defines an orthogonal axis
m_mod = modulus(m[i, :])
m[i, :] /= m_mod #Normalises orthoganal axis
x_prime = axis_rotation(m[i, :], xyz[i, :], rotation_angle[i]) ##Rotates about an axis
xmod = modulus(x_prime)
x_prime /= xmod
ra_prime[i], dec_prime[i] = back_convert(x_prime)
ra_prime[i] = testr[0][0] + (ra_prime[i] - 50)
dec_prime[i] = testd[0][0] + (dec_prime[i] - 10)
###################################################################################################
#ident = np.zeros(df_master.shape[0])
print(str(i + 1), "out of " + str(N))
print("Test galaxy: ", str(gals[i]))
#ident[current_i] = 1
#df_master["Identifier"] = ident ## Creates a mask for identifying the host galaxy
#q, t, df_sliced = reduction(abs(ra_prime[i]), dec_prime[i], df_master) ## Reduces the catalog by RA and dec
''''My new function'''
#selects the corresponding sectors to look through
df_sliced = Sector_find(ra_prime[i], dec_prime[i], error_radius)
df_sliced = df_sliced.rename(columns = {"Unnamed: 0.1": "Unnamed: 0"})
#creates a mask to identify the host galaxy, the host having an identifier of 1
ident = np.zeros(df_sliced.shape[0])
df_sliced["Identifier"] = ident
df_sliced.at[current_i, "Identifier"] = 1
#if statement resolves an issue where sometimes the host galaxy has its info corrupted
if math.isnan(df_sliced.loc[current_i][ "RA"]) == True:
'''
checks if the position data is corrupted, if so then it retrives the information
from the master file. The only thing that isn't recovered is the sector but
that won't really matter, plus I can grab that if it is needed
'''
common = df_sliced.columns & df_master.columns
x = df_master.loc[current_i]
df_sliced.at[current_i, common] = list(x)
ra = np.array(df_sliced[["RA"]].values.tolist())[:, 0]
dec = np.array(df_sliced[["dec"]].values.tolist())[:, 0]
Luminosity = np.array(df_sliced[["B Luminosity"]].values.tolist()) #Luminosity_Handling(np.array(df_sliced[["Absolute B Magnitude"]].values.tolist())) ## Converts A
dl = np.array(df_sliced[["Luminosity Distance"]].values.tolist())
lum_prob, SGR_test = L_func(received_luminosity[i], c, dl) ##Uses the luminosity function to calculate probabilities
df_sliced["Luminosity Probability"] = lum_prob
df_sliced["SGR flag"] = SGR_test
angular_distaance = np.zeros(df_sliced.shape[0])
for k in range(df_sliced.shape[0]):
angular_distaance[k] = Ang_Dist(ra[k], ra_prime[i], dec[k], dec_prime[i])
id_check = [i for i, val in enumerate(angular_distaance) if math.isnan(val) == True]
for k in range(len(id_check)):
angular_distaance[int(id_check[k])] = Ang_Dist(ra_prime[i], testr, dec_prime[i], testd)
angular_distance = Ang_Dist(ra, testr[0][0], dec, testd[0][0])
# Spit out comparison ra and dec
# Sky position and true luminosity
# We might find that knowing the details might help better interpret the results
# Test revisions
df_sliced["Angular Distance"] = angular_distaance
ranking = rank(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank"] = ranking
ranking2 = rank2(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank2"] = ranking2
ranking3 = rank3(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank3"] = ranking3
ranking4 = rank4(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank4"] = ranking4
ranking5 = rank5(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank5"] = ranking5
ranking6 = rank6(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank6"] = ranking6
ranking7 = rank7(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank7"] = ranking7
ranking8 = rank8(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank8"] = ranking8
ranking9 = rank9(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank9"] = ranking9
ranking10 = rank10(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank10"] = ranking10
ranking11 = rank11(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank11"] = ranking11
ranking12 = rank12(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank12"] = ranking12
ranking13 = rank13(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank13"] = ranking13
ranking14 = rank14(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank14"] = ranking14
ranking15 = rank15(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank15"] = ranking15
ranking16 = rank16(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank16"] = ranking16
ranking17 = rank17(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank17"] = ranking17
ranking18 = rank18(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank18"] = ranking18
ranking19 = rank19(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank19"] = ranking19
ranking20 = rank20(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank20"] = ranking20
ranking21 = rank21(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank21"] = ranking21
ranking22 = rank22(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank22"] = ranking22
ranking23 = rank23(angular_distaance, error_radius, dl, Luminosity, lum_prob) ## Uses defined ranking statistic
df_sliced["Rank23"] = ranking23
fin_ra = np.asarray(df_sliced[["RA"]].values.tolist()); fin_dec = np.asarray(df_sliced[["dec"]].values.tolist())
## Storing values and extending the reduced catalog
df_sliced = (pd.DataFrame.sort_values(df_sliced, by = ["Rank"], ascending = False)) ## Orders resultant sliced array
df_sliced2 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank2"], ascending = False))
df_sliced3 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank3"], ascending = False))
df_sliced4 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank4"], ascending = False))
df_sliced5 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank5"], ascending = False))
df_sliced6 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank6"], ascending = False))
df_sliced7 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank7"], ascending = False))
df_sliced8 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank8"], ascending = False)) ## Orders resultant sliced array
df_sliced9 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank9"], ascending = False))
df_sliced10 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank10"], ascending = False))
df_sliced11 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank11"], ascending = False))
df_sliced12 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank12"], ascending = False))
df_sliced13 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank13"], ascending = False))
df_sliced14 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank14"], ascending = False))
df_sliced15 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank15"], ascending = False))
df_sliced16 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank16"], ascending = False))
df_sliced17 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank17"], ascending = False))
df_sliced18 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank18"], ascending = False))
df_sliced19 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank19"], ascending = False))
df_sliced20 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank20"], ascending = False))
df_sliced21 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank21"], ascending = False))
df_sliced22 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank22"], ascending = False))
df_sliced23 = (pd.DataFrame.sort_values(df_sliced, by = ["Rank23"], ascending = False))
idi = df_sliced[["Identifier"]].values.tolist() ##Mask handling to check for values
id2 = df_sliced2[["Identifier"]].values.tolist()
id3 = df_sliced3[["Identifier"]].values.tolist()
id4 = df_sliced4[["Identifier"]].values.tolist()
id5 = df_sliced5[["Identifier"]].values.tolist()
id6 = df_sliced6[["Identifier"]].values.tolist()
id7 = df_sliced7[["Identifier"]].values.tolist()
id8 = df_sliced8[["Identifier"]].values.tolist() ##Mask handling to check for values
id9 = df_sliced9[["Identifier"]].values.tolist()
id10 = df_sliced10[["Identifier"]].values.tolist()
id11 = df_sliced11[["Identifier"]].values.tolist()
id12 = df_sliced12[["Identifier"]].values.tolist()
id13 = df_sliced13[["Identifier"]].values.tolist()
id14 = df_sliced14[["Identifier"]].values.tolist()
id15 = df_sliced15[["Identifier"]].values.tolist()
id16 = df_sliced16[["Identifier"]].values.tolist()
id17 = df_sliced17[["Identifier"]].values.tolist()
id18 = df_sliced18[["Identifier"]].values.tolist()
id19 = df_sliced19[["Identifier"]].values.tolist()
id20 = df_sliced20[["Identifier"]].values.tolist()
id21 = df_sliced21[["Identifier"]].values.tolist()
id22 = df_sliced22[["Identifier"]].values.tolist()
id23 = df_sliced23[["Identifier"]].values.tolist()
mask_check = [i for i, val in enumerate(idi) if val == [1]]
mask_check2 = [i for i, val in enumerate(id2) if val == [1]]
mask_check3 = [i for i, val in enumerate(id3) if val == [1]]
mask_check4 = [i for i, val in enumerate(id4) if val == [1]]
mask_check5 = [i for i, val in enumerate(id5) if val == [1]]
mask_check6 = [i for i, val in enumerate(id6) if val == [1]]
mask_check7 = [i for i, val in enumerate(id7) if val == [1]]
mask_check8 = [i for i, val in enumerate(id8) if val == [1]]
mask_check9 = [i for i, val in enumerate(id9) if val == [1]]
mask_check10 = [i for i, val in enumerate(id10) if val == [1]]
mask_check11 = [i for i, val in enumerate(id11) if val == [1]]
mask_check12 = [i for i, val in enumerate(id12) if val == [1]]
mask_check13 = [i for i, val in enumerate(id13) if val == [1]]
mask_check14 = [i for i, val in enumerate(id14) if val == [1]]
mask_check15 = [i for i, val in enumerate(id15) if val == [1]]
mask_check16 = [i for i, val in enumerate(id16) if val == [1]]
mask_check17 = [i for i, val in enumerate(id17) if val == [1]]
mask_check18 = [i for i, val in enumerate(id18) if val == [1]]
mask_check19 = [i for i, val in enumerate(id19) if val == [1]]
mask_check20 = [i for i, val in enumerate(id20) if val == [1]]
mask_check21 = [i for i, val in enumerate(id21) if val == [1]]
mask_check22 = [i for i, val in enumerate(id22) if val == [1]]
mask_check23 = [i for i, val in enumerate(id23) if val == [1]]
Luminosity = np.asarray(Luminosity)
if len(mask_check20) == 0:
print("Did not place\n\n\n")
next
else:
length = len(id20) + 1
placement[i] = mask_check[0] + 1; length = len(idi) + 1
placement2[i] = mask_check2[0] + 1
placement3[i] = mask_check3[0] + 1
placement4[i] = mask_check4[0] + 1
placement5[i] = mask_check5[0] + 1
placement6[i] = mask_check6[0] + 1
placement7[i] = mask_check7[0] + 1
placement8[i] = mask_check8[0] + 1
placement9[i] = mask_check9[0] + 1
placement10[i] = mask_check10[0] + 1
placement11[i] = mask_check11[0] + 1
placement12[i] = mask_check12[0] + 1
placement13[i] = mask_check13[0] + 1
placement14[i] = mask_check14[0] + 1
placement15[i] = mask_check15[0] + 1
placement16[i] = mask_check16[0] + 1
placement17[i] = mask_check17[0] + 1
placement18[i] = mask_check18[0] + 1
placement19[i] = mask_check19[0] + 1
placement20[i] = mask_check20[0] + 1
placement21[i] = mask_check21[0] + 1
placement22[i] = mask_check22[0] + 1
placement23[i] = mask_check23[0] + 1
#display(Markdown("The keplerian orbit appears to be happening at r ={0:.2f} km" .format(float(kepler(M_kep, w))/1000)))
print("Galaxy data: \nDistance is {0:.2f} Mpc\nLuminosity is {1:.3e}\nra and dec [{2:.2f}, {3:.2f}] compared to reported ra and dec [{4:.2f}, {5:.2f}] \nTrue luminosity {6:.3e} W" .format(dl[int(placement[i] - 1)][0], Luminosity[int(placement[i] - 1)][0], fin_ra[int(placement[i] - 1)][0], fin_dec[int(placement[i] - 1)][0], testr[0][0], testd[0][0], b[i]))
print("Galaxy placed", int(placement[i]), "out of", str(length), "with statistic 1\n\n\n")
print("Galaxy placed", int(placement2[i]), "out of", str(length), "with statistic 2\n\n\n")
print("Galaxy placed", int(placement3[i]), "out of", str(length), "with statistic 3\n\n\n")
print("Galaxy placed", int(placement4[i]), "out of", str(length), "with statistic 4\n\n\n")
print("Galaxy placed", int(placement5[i]), "out of", str(length), "with statistic 5\n\n\n")
print("Galaxy placed", int(placement6[i]), "out of", str(length), "with statistic 6\n\n\n")
print("Galaxy placed", int(placement7[i]), "out of", str(length), "with statistic 7\n\n\n")
print("Galaxy placed", int(placement8[i]), "out of", str(length), "with statistic 8\n\n\n")
print("Galaxy placed", int(placement9[i]), "out of", str(length), "with statistic 9\n\n\n")
print("Galaxy placed", int(placement10[i]), "out of", str(length), "with statistic 10\n\n\n")
print("Galaxy placed", int(placement11[i]), "out of", str(length), "with statistic 11\n\n\n")
print("Galaxy placed", int(placement12[i]), "out of", str(length), "with statistic 12\n\n\n")
print("Galaxy placed", int(placement13[i]), "out of", str(length), "with statistic 13\n\n\n")
print("Galaxy placed", int(placement14[i]), "out of", str(length), "with statistic 14\n\n\n")
print("Galaxy placed", int(placement15[i]), "out of", str(length), "with statistic 15\n\n\n")
print("Galaxy placed", int(placement16[i]), "out of", str(length), "with statistic 16\n\n\n")
print("Galaxy placed", int(placement17[i]), "out of", str(length), "with statistic 17\n\n\n")
print("Galaxy placed", int(placement18[i]), "out of", str(length), "with statistic 18\n\n\n")
print("Galaxy placed", int(placement19[i]), "out of", str(length), "with statistic 19\n\n\n")
print("Galaxy placed", int(placement20[i]), "out of", str(length), "with statistic 20\n\n\n")
print("Galaxy placed", int(placement21[i]), "out of", str(length), "with statistic 21\n\n\n")
print("Galaxy placed", int(placement22[i]), "out of", str(length), "with statistic 22\n\n\n")
print("Galaxy placed", int(placement23[i]), "out of", str(length), "with statistic 23\n\n\n")
percentages[i] = placement[i]/length
percentages2[i] = placement2[i]/length
percentages3[i] = placement3[i]/length
percentages4[i] = placement4[i]/length
percentages5[i] = placement5[i]/length
percentages6[i] = placement6[i]/length
percentages7[i] = placement7[i]/length
percentages8[i] = placement8[i]/length
percentages9[i] = placement9[i]/length
percentages10[i] = placement10[i]/length
percentages11[i] = placement11[i]/length
percentages12[i] = placement12[i]/length
percentages13[i] = placement13[i]/length
percentages14[i] = placement14[i]/length
percentages15[i] = placement15[i]/length
percentages16[i] = placement16[i]/length
percentages17[i] = placement17[i]/length
percentages18[i] = placement18[i]/length
percentages19[i] = placement19[i]/length
percentages20[i] = placement20[i]/length
percentages21[i] = placement21[i]/length
percentages22[i] = placement22[i]/length
percentages23[i] = placement23[i]/length
distances[i] = int(dl[int(placement[i]) - 1][0]); luminosity_i[i] = int(Luminosity[int(placement[i]) - 1][0])
ras_dex[i, 0] = fin_ra[int(placement[i] - 1)]; ras_dex[i, 1] = fin_dec[int(placement[i] - 1)]; test_case[i, 0] = testr[0][0]; test_case[i, 1] = testd[0][0]
#rank_host[i] = df_sliced20[["Rank20"]].values.tolist()[id20.index(max(id20))][0]
faulty[i, 0] = df_master[["RA"]].values.tolist()[current_i][0] #ra of galaxy
faulty[i, 1] = ra_prime[i] #ra of grb
faulty[i, 2] = df_master[["dec"]].values.tolist()[current_i][0] #dec of galaxy
faulty[i, 3] = dec_prime[i] #dec of grb
if math.isnan(rank_host[i]) == True:
faulty[i, 4] = 1 #Mask
no_se_func.append(i)
#break
else:
faulty[i, 4] = 0 #Mask
next
"""
for k in range(5):
aa[i][k] = np.exp(-(df_sliced[["Angular Distance"]].head(5).values.tolist()[k][0])/error_radius)
ab[i][k] = df_sliced[["Luminosity Distance"]].head(5).values.tolist()[k][0]
ac[i][k] = df_sliced[["B Luminosity"]].head(5).values.tolist()[k][0]
ad[i][k] = df_sliced[["Luminosity Probability"]].head(5).values.tolist()[k][0]
"""
"""
plt.figure(0)
plt.plot(percentages19, np.log10(distances), "kx")
#plt.title("Distance vs. percentage performance")
plt.ylabel("Log$_{10}$ Distance /Mpc"); plt.xlabel("Percentage placement"); plt.grid()
#plt.xlim(1e-27, 1)
plt.savefig("Distances vs. percentage.png")
plt.figure(1)
plt.plot(percentages19, np.log10(b), "kx")
#plt.title("Intrinsic Luminosity vs. percentage performance")
plt.ylabel("Log$_{10}$ Luminosity /W"); plt.xlabel("Percentage placement"); plt.grid()
#plt.xlim(1e-27, 1)
plt.savefig("Luminosity vs. percentage.png")
plt.figure(2)
plt.plot(percentages19, rotation_angle, "kx")
plt.ylabel("Angular offset /$^o$"); plt.xlabel("Percentage performance")
plt.grid()
plt.savefig("Angular offset vs. percentage.png")
### The following can be used to investigate any values that flag up as false
"""
f_v = [i for i, val in enumerate(faulty[:, 4]) if val == 0]
f_1v = [i for i, val in enumerate(faulty[:, 4]) if val == 1]
sets = set(np.arange(0, len(faulty), 1)) - set(f_v)
ft = | pd.DataFrame(faulty) | pandas.DataFrame |
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv("./data1402.csv", encoding='utf-8', dtype=str)
df = pd.DataFrame(df, columns=['score'], dtype=np.float)
section = np.array(range(0, 105, 5))
result = | pd.cut(df['score'], section) | pandas.cut |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import logging
import time
from enum import Enum
from itertools import chain
from typing import Iterable, List, Optional, Tuple, Union, Dict
from urllib.parse import urlencode
import cachetools
import numpy
import pandas as pd
from cachetools import TTLCache
from gs_quant.api.data import DataApi
from gs_quant.base import Base
from gs_quant.data.core import DataContext, DataFrequency
from gs_quant.data.log import log_debug
from gs_quant.errors import MqValueError
from gs_quant.markets import MarketDataCoordinate
from gs_quant.session import GsSession
from gs_quant.target.common import MarketDataVendor, PricingLocation
from gs_quant.target.coordinates import MDAPIDataBatchResponse, MDAPIDataQuery, MDAPIDataQueryResponse, MDAPIQueryField
from gs_quant.target.data import DataQuery, DataQueryResponse
from gs_quant.target.data import DataSetEntity, DataSetFieldEntity
from .assets import GsIdType
from ...target.assets import EntityQuery, FieldFilterMap
_logger = logging.getLogger(__name__)
class QueryType(Enum):
IMPLIED_VOLATILITY = "Implied Volatility"
IMPLIED_VOLATILITY_BY_EXPIRATION = "Implied Volatility By Expiration"
IMPLIED_CORRELATION = "Implied Correlation"
REALIZED_CORRELATION = "Realized Correlation"
AVERAGE_IMPLIED_VOLATILITY = "Average Implied Volatility"
AVERAGE_IMPLIED_VARIANCE = "Average Implied Variance"
AVERAGE_REALIZED_VOLATILITY = "Average Realized Volatility"
SWAP_RATE = "Swap Rate"
SWAP_ANNUITY = "Swap Annuity"
SWAPTION_PREMIUM = "Swaption Premium"
SWAPTION_ANNUITY = "Swaption Annuity"
BASIS_SWAP_RATE = "Basis Swap Rate"
XCCY_SWAP_SPREAD = "Xccy Swap Spread"
SWAPTION_VOL = "Swaption Vol"
MIDCURVE_VOL = "Midcurve Vol"
CAP_FLOOR_VOL = "Cap Floor Vol"
SPREAD_OPTION_VOL = "Spread Option Vol"
INFLATION_SWAP_RATE = "Inflation Swap Rate"
FORWARD = "Forward"
PRICE = "Price"
ATM_FWD_RATE = "Atm Fwd Rate"
BASIS = "Basis"
VAR_SWAP = "Var Swap"
MIDCURVE_PREMIUM = "Midcurve Premium"
MIDCURVE_ANNUITY = "Midcurve Annuity"
MIDCURVE_ATM_FWD_RATE = "Midcurve Atm Fwd Rate"
CAP_FLOOR_ATM_FWD_RATE = "Cap Floor Atm Fwd Rate"
SPREAD_OPTION_ATM_FWD_RATE = "Spread Option Atm Fwd Rate"
FORECAST = "Forecast"
IMPLIED_VOLATILITY_BY_DELTA_STRIKE = "Implied Volatility By Delta Strike"
FUNDAMENTAL_METRIC = "Fundamental Metric"
POLICY_RATE_EXPECTATION = "Policy Rate Expectation"
CENTRAL_BANK_SWAP_RATE = "Central Bank Swap Rate"
FORWARD_PRICE = "Forward Price"
FAIR_PRICE = "Fair Price"
PNL = "Pnl"
SPOT = "Spot"
ES_NUMERIC_SCORE = "Es Numeric Score"
ES_NUMERIC_PERCENTILE = "Es Numeric Percentile"
ES_POLICY_SCORE = "Es Policy Score"
ES_POLICY_PERCENTILE = "Es Policy Percentile"
ES_SCORE = "Es Score"
ES_PERCENTILE = "Es Percentile"
ES_PRODUCT_IMPACT_SCORE = "Es Product Impact Score"
ES_PRODUCT_IMPACT_PERCENTILE = "Es Product Impact Percentile"
G_SCORE = "G Score"
G_PERCENTILE = "G Percentile"
ES_MOMENTUM_SCORE = "Es Momentum Score"
ES_MOMENTUM_PERCENTILE = "Es Momentum Percentile"
G_REGIONAL_SCORE = "G Regional Score"
G_REGIONAL_PERCENTILE = "G Regional Percentile"
ES_DISCLOSURE_PERCENTAGE = "Es Disclosure Percentage"
CONTROVERSY_SCORE = "Controversy Score"
CONTROVERSY_PERCENTILE = "Controversy Percentile"
RATING = "Rating"
CONVICTION_LIST = "Conviction List"
FAIR_VALUE = "Fair Value"
FX_FORECAST = "Fx Forecast"
GROWTH_SCORE = "Growth Score"
FINANCIAL_RETURNS_SCORE = "Financial Returns Score"
MULTIPLE_SCORE = "Multiple Score"
INTEGRATED_SCORE = "Integrated Score"
COMMODITY_FORECAST = "Commodity Forecast"
FORECAST_VALUE = "Forecast Value"
FORWARD_POINT = "Forward Point"
FCI = "Fci"
LONG_RATES_CONTRIBUTION = "Long Rates Contribution"
SHORT_RATES_CONTRIBUTION = "Short Rates Contribution"
CORPORATE_SPREAD_CONTRIBUTION = "Corporate Spread Contribution"
SOVEREIGN_SPREAD_CONTRIBUTION = "Sovereign Spread Contribution"
EQUITIES_CONTRIBUTION = "Equities Contribution"
REAL_LONG_RATES_CONTRIBUTION = "Real Long Rates Contribution"
REAL_SHORT_RATES_CONTRIBUTION = "Real Short Rates Contribution"
REAL_FCI = "Real Fci"
DWI_CONTRIBUTION = "Dwi Contribution"
REAL_TWI_CONTRIBUTION = "Real Twi Contribution"
TWI_CONTRIBUTION = "Twi Contribution"
COVARIANCE = "Covariance"
FACTOR_EXPOSURE = "Factor Exposure"
FACTOR_RETURN = "Factor Return"
FACTOR_PNL = "Factor Pnl"
FACTOR_PROPORTION_OF_RISK = "Factor Proportion Of Risk"
DAILY_RISK = "Daily Risk"
ANNUAL_RISK = "Annual Risk"
VOLATILITY = "Volatility"
CORRELATION = "Correlation"
OIS_XCCY = "Ois Xccy"
OIS_XCCY_EX_SPIKE = "Ois Xccy Ex Spike"
USD_OIS = "Usd Ois"
NON_USD_OIS = "Non Usd Ois"
class GsDataApi(DataApi):
__definitions = {}
__asset_coordinates_cache = TTLCache(10000, 86400)
DEFAULT_SCROLL = '30s'
# DataApi interface
@classmethod
def query_data(cls, query: Union[DataQuery, MDAPIDataQuery], dataset_id: str = None,
asset_id_type: Union[GsIdType, str] = None) \
-> Union[MDAPIDataBatchResponse, DataQueryResponse, tuple, list]:
if isinstance(query, MDAPIDataQuery) and query.market_data_coordinates:
# Don't use MDAPIDataBatchResponse for now - it doesn't handle quoting style correctly
results: Union[MDAPIDataBatchResponse, dict] = cls.execute_query('coordinates', query)
if isinstance(results, dict):
return results.get('responses', ())
else:
return results.responses if results.responses is not None else ()
response: Union[DataQueryResponse, dict] = cls.execute_query(dataset_id, query)
return cls.get_results(dataset_id, response, query)
@staticmethod
def execute_query(dataset_id: str, query: Union[DataQuery, MDAPIDataQuery]):
return GsSession.current._post('/data/{}/query'.format(dataset_id), payload=query)
@staticmethod
def get_results(dataset_id: str, response: Union[DataQueryResponse, dict], query: DataQuery) -> list:
if isinstance(response, dict):
total_pages = response.get('totalPages')
results = response.get('data', ())
else:
total_pages = response.total_pages if response.total_pages is not None else 0
results = response.data if response.data is not None else ()
if total_pages:
if query.page is None:
query.page = total_pages - 1
results = results + GsDataApi.get_results(dataset_id, GsDataApi.execute_query(dataset_id, query), query)
elif query.page - 1 > 0:
query.page -= 1
results = results + GsDataApi.get_results(dataset_id, GsDataApi.execute_query(dataset_id, query), query)
else:
return results
return results
@classmethod
def last_data(cls, query: Union[DataQuery, MDAPIDataQuery], dataset_id: str = None, timeout: int = None) \
-> Union[list, tuple]:
kwargs = {}
if timeout is not None:
kwargs['timeout'] = timeout
if getattr(query, 'marketDataCoordinates', None):
result = GsSession.current._post('/data/coordinates/query/last', payload=query, **kwargs)
return result.get('responses', ())
else:
result = GsSession.current._post('/data/{}/last/query'.format(dataset_id), payload=query, **kwargs)
return result.get('data', ())
@classmethod
def symbol_dimensions(cls, dataset_id: str) -> tuple:
definition = cls.get_definition(dataset_id)
return definition.dimensions.symbolDimensions
@classmethod
def time_field(cls, dataset_id: str) -> str:
definition = cls.get_definition(dataset_id)
return definition.dimensions.timeField
# GS-specific functionality
@classmethod
def get_coverage(
cls,
dataset_id: str,
scroll: str = DEFAULT_SCROLL,
scroll_id: Optional[str] = None,
limit: int = None,
offset: int = None,
fields: List[str] = None,
include_history: bool = False
) -> List[dict]:
params = {
'limit': limit or 4000,
'scroll': scroll
}
if scroll_id:
params['scrollId'] = scroll_id
if offset:
params['offset'] = offset
if fields:
params['fields'] = fields
if include_history:
params['includeHistory'] = 'true'
body = GsSession.current._get(f'/data/{dataset_id}/coverage', payload=params)
results = scroll_results = body['results']
total_results = body['totalResults']
while len(scroll_results) and len(results) < total_results:
params['scrollId'] = body['scrollId']
body = GsSession.current._get(f'/data/{dataset_id}/coverage', payload=params)
scroll_results = body['results']
results += scroll_results
return results
@classmethod
def create(cls, definition: Union[DataSetEntity, dict]) -> DataSetEntity:
result = GsSession.current._post('/data/datasets', payload=definition)
return result
@classmethod
def update_definition(cls, dataset_id: str, definition: Union[DataSetEntity, dict]) -> DataSetEntity:
result = GsSession.current._put('/data/datasets/{}'.format(dataset_id), payload=definition, cls=DataSetEntity)
return result
@classmethod
def upload_data(cls, dataset_id: str, data: Union[pd.DataFrame, list, tuple]) -> dict:
if isinstance(data, pd.DataFrame):
# We require the Dataframe to return a list in the 'records' format:
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html
data = data.to_json(orient='records')
result = GsSession.current._post('/data/{}'.format(dataset_id), payload=data)
return result
@classmethod
def get_definition(cls, dataset_id: str) -> DataSetEntity:
definition = cls.__definitions.get(dataset_id)
if not definition:
definition = GsSession.current._get('/data/datasets/{}'.format(dataset_id), cls=DataSetEntity)
if not definition:
raise MqValueError('Unknown dataset {}'.format(dataset_id))
cls.__definitions[dataset_id] = definition
return definition
@classmethod
def get_many_definitions(cls,
limit: int = 100,
dataset_id: str = None,
owner_id: str = None,
name: str = None,
mq_symbol: str = None) -> Tuple[DataSetEntity, ...]:
query_string = urlencode(dict(filter(lambda item: item[1] is not None,
dict(id=dataset_id, ownerId=owner_id, name=name,
mqSymbol=mq_symbol, limit=limit).items())))
res = GsSession.current._get('/data/datasets?{query}'.format(query=query_string), cls=DataSetEntity)['results']
return res
@classmethod
@cachetools.cached(__asset_coordinates_cache)
def get_many_coordinates(
cls,
mkt_type: str = None,
mkt_asset: str = None,
mkt_class: str = None,
mkt_point: Tuple[str, ...] = (),
*,
limit: int = 100,
return_type: type = str,
) -> Union[Tuple[str, ...], Tuple[MarketDataCoordinate, ...]]:
where = FieldFilterMap(
mkt_type=mkt_type.upper() if mkt_type is not None else None,
mkt_asset=mkt_asset.upper() if mkt_asset is not None else None,
mkt_class=mkt_class.upper() if mkt_class is not None else None,
)
for index, point in enumerate(mkt_point):
setattr(where, 'mkt_point' + str(index + 1), point.upper())
query = EntityQuery(
where=where,
limit=limit
)
results = GsSession.current._post('/data/mdapi/query', query)['results']
if return_type is str:
return tuple(coordinate['name'] for coordinate in results)
elif return_type is MarketDataCoordinate:
return tuple(
MarketDataCoordinate(
mkt_type=coordinate['dimensions']['mktType'],
mkt_asset=coordinate['dimensions']['mktAsset'],
mkt_class=coordinate['dimensions']['mktClass'],
mkt_point=tuple(coordinate['dimensions']['mktPoint'].values()),
mkt_quoting_style=coordinate['dimensions']['mktQuotingStyle']
) for coordinate in results)
else:
raise NotImplementedError('Unsupported return type')
@staticmethod
def build_market_data_query(asset_ids: List[str], query_type: QueryType, where: Union[FieldFilterMap, Dict] = None,
source: Union[str] = None, real_time: bool = False, measure='Curve'):
inner = {
'entityIds': asset_ids,
'queryType': query_type.value,
'where': where or {},
'source': source or 'any',
'frequency': 'Real Time' if real_time else 'End Of Day',
'measures': [
measure
]
}
if DataContext.current.interval is not None:
inner['interval'] = DataContext.current.interval
if real_time:
inner['startTime'] = DataContext.current.start_time
inner['endTime'] = DataContext.current.end_time
else:
inner['startDate'] = DataContext.current.start_date
inner['endDate'] = DataContext.current.end_date
return {
'queries': [inner]
}
@classmethod
def get_data_providers(cls,
entity_id: str,
availability: Optional[Dict] = None) -> Dict:
"""Return daily and real-time data providers
:param entity_id: identifier of entity i.e. asset, country, subdivision
:param availability: Optional Measures Availability response for the entity
:return: dictionary of available data providers
** Usage **
Return a dictionary containing a set of dataset providers for each available data field.
For each field will return a dict of daily and real-time dataset providers where available.
"""
response = availability if availability else GsSession.current._get(f'/data/measures/{entity_id}/availability')
if 'errorMessages' in response:
raise MqValueError(f"Data availability request {response['requestId']} "
f"failed: {response.get('errorMessages', '')}")
if 'data' not in response:
return {}
providers = {}
all_data_mappings = sorted(response['data'], key=lambda x: x['rank'], reverse=True)
for source in all_data_mappings:
freq = source.get('frequency', 'End Of Day')
dataset_field = source.get('datasetField', '')
rank = source.get('rank')
providers.setdefault(dataset_field, {})
if rank:
if freq == 'End Of Day':
providers[dataset_field][DataFrequency.DAILY] = source['datasetId']
elif freq == 'Real Time':
providers[dataset_field][DataFrequency.REAL_TIME] = source['datasetId']
return providers
@classmethod
def get_market_data(cls, query, request_id=None) -> pd.DataFrame:
GsSession.current: GsSession
start = time.perf_counter()
body = GsSession.current._post('/data/measures', payload=query)
log_debug(request_id, _logger, 'market data query (%s) ran in %.3f ms', body.get('requestId'),
(time.perf_counter() - start) * 1000)
ids = []
parts = []
for e in body['responses']:
container = e['queryResponse'][0]
ids.extend(container.get('dataSetIds', ()))
if 'errorMessages' in container:
raise MqValueError(f"measure service request {body['requestId']} failed: {container['errorMessages']}")
if 'response' in container:
df = MarketDataResponseFrame(container['response']['data'])
df.set_index('date' if 'date' in df.columns else 'time', inplace=True)
df.index = pd.to_datetime(df.index)
parts.append(df)
df = pd.concat(parts) if len(parts) > 0 else MarketDataResponseFrame()
df.dataset_ids = tuple(ids)
return df
@classmethod
def __normalise_coordinate_data(
cls,
data: Iterable[Union[MDAPIDataQueryResponse, Dict]],
fields: Optional[Tuple[MDAPIQueryField, ...]] = None
) -> Iterable[Iterable[Dict]]:
ret = []
for response in data:
coord_data = []
rows = (
r.as_dict() for r in response.data) if isinstance(
response,
MDAPIDataQueryResponse) else response.get(
'data',
())
for pt in rows:
if not pt:
continue
if not fields and 'value' not in pt:
value_field = pt['mktQuotingStyle']
pt['value'] = pt.pop(value_field)
coord_data.append(pt)
ret.append(coord_data)
return ret
@classmethod
def __df_from_coordinate_data(
cls,
data: Iterable[Dict],
*,
use_datetime_index: Optional[bool] = True
) -> pd.DataFrame:
df = cls._sort_coordinate_data(pd.DataFrame.from_records(data))
index_field = next((f for f in ('time', 'date') if f in df.columns), None)
if index_field and use_datetime_index:
df = df.set_index(pd.DatetimeIndex(df.loc[:, index_field].values))
return df
@classmethod
def _sort_coordinate_data(
cls,
df: pd.DataFrame,
by: Tuple[str] = ('date', 'time', 'mktType', 'mktAsset', 'mktClass', 'mktPoint', 'mktQuotingStyle', 'value')
) -> pd.DataFrame:
columns = df.columns
field_order = [f for f in by if f in columns]
field_order.extend(f for f in columns if f not in field_order)
return df[field_order]
@classmethod
def _coordinate_from_str(cls, coordinate_str: str) -> MarketDataCoordinate:
tmp = coordinate_str.rsplit(".", 1)
dimensions = tmp[0].split("_")
if len(dimensions) < 2:
raise MqValueError('invalid coordinate ' + coordinate_str)
kwargs = {
'mkt_type': dimensions[0],
'mkt_asset': dimensions[1] or None,
'mkt_quoting_style': tmp[-1] if len(tmp) > 1 else None}
if len(dimensions) > 2:
kwargs['mkt_class'] = dimensions[2] or None
if len(dimensions) > 3:
kwargs['mkt_point'] = tuple(dimensions[3:]) or None
return MarketDataCoordinate(**kwargs)
@classmethod
def coordinates_last(
cls,
coordinates: Union[Iterable[str], Iterable[MarketDataCoordinate]],
as_of: Union[dt.datetime, dt.date] = None,
vendor: MarketDataVendor = MarketDataVendor.Goldman_Sachs,
as_dataframe: bool = False,
pricing_location: Optional[PricingLocation] = None,
timeout: int = None
) -> Union[Dict, pd.DataFrame]:
"""
Get last value of coordinates data
:param coordinates: market data coordinate(s)
:param as_of: snapshot date or time
:param vendor: data vendor
:param as_dataframe: whether to return the result as Dataframe
:param pricing_location: the location where close data has been recorded (not used for real-time query)
:param timeout: data query timeout; if timeout is not set then the default timeout is used
:return: Dataframe or dictionary of the returned data
**Examples**
>>> coordinate = ("FX Fwd_USD/EUR_Fwd Pt_2y",)
>>> data = GsDataApi.coordinates_last(coordinate, dt.datetime(2019, 11, 19))
"""
market_data_coordinates = tuple(cls._coordinate_from_str(coord) if isinstance(coord, str) else coord
for coord in coordinates)
query = cls.build_query(
end=as_of,
market_data_coordinates=market_data_coordinates,
vendor=vendor,
pricing_location=pricing_location
)
kwargs = {}
if timeout is not None:
kwargs['timeout'] = timeout
data = cls.last_data(query, **kwargs)
if not as_dataframe:
ret = {coordinate: None for coordinate in market_data_coordinates}
for idx, row in enumerate(cls.__normalise_coordinate_data(data)):
try:
ret[market_data_coordinates[idx]] = row[0]['value']
except IndexError:
ret[market_data_coordinates[idx]] = None
return ret
ret = []
datetime_field = 'time' if isinstance(as_of, dt.datetime) else 'date'
for idx, row in enumerate(cls.__normalise_coordinate_data(data)):
coordinate_as_dict = market_data_coordinates[idx].as_dict(as_camel_case=True)
try:
ret.append(dict(chain(coordinate_as_dict.items(),
(('value', row[0]['value']), (datetime_field, row[0][datetime_field])))))
except IndexError:
ret.append(dict(chain(coordinate_as_dict.items(), (('value', None), (datetime_field, None)))))
return cls.__df_from_coordinate_data(ret, use_datetime_index=False)
@classmethod
def coordinates_data(
cls,
coordinates: Union[str, MarketDataCoordinate, Iterable[str], Iterable[MarketDataCoordinate]],
start: Union[dt.datetime, dt.date] = None,
end: Union[dt.datetime, dt.date] = None,
vendor: MarketDataVendor = MarketDataVendor.Goldman_Sachs,
as_multiple_dataframes: bool = False,
pricing_location: Optional[PricingLocation] = None,
fields: Optional[Tuple[MDAPIQueryField, ...]] = None,
**kwargs
) -> Union[pd.DataFrame, Tuple[pd.DataFrame]]:
"""
Get coordinates data
:param coordinates: market data coordinate(s)
:param start: start date or time
:param end: end date or time
:param vendor: data vendor
:param as_multiple_dataframes: whether to return the result as one or multiple Dataframe(s)
:param pricing_location: the location where close data has been recorded (not used for real-time query)
:param fields: value fields to return
:param kwargs: Extra query arguments
:return: Dataframe(s) of the returned data
**Examples**
>>> coordinate = ("FX Fwd_USD/EUR_Fwd Pt_2y",)
>>> data = GsDataApi.coordinates_data(coordinate, dt.datetime(2019, 11, 18), dt.datetime(2019, 11, 19))
"""
coordinates_iterable = (coordinates,) if isinstance(coordinates, (MarketDataCoordinate, str)) else coordinates
query = cls.build_query(
market_data_coordinates=tuple(cls._coordinate_from_str(coord) if isinstance(coord, str) else coord
for coord in coordinates_iterable),
vendor=vendor,
start=start,
end=end,
pricing_location=pricing_location,
fields=fields,
**kwargs
)
results = cls.__normalise_coordinate_data(cls.query_data(query), fields=fields)
if as_multiple_dataframes:
return tuple(GsDataApi.__df_from_coordinate_data(r) for r in results)
else:
return cls.__df_from_coordinate_data(chain.from_iterable(results))
@classmethod
def coordinates_data_series(
cls,
coordinates: Union[str, MarketDataCoordinate, Iterable[str], Iterable[MarketDataCoordinate]],
start: Union[dt.datetime, dt.date] = None,
end: Union[dt.datetime, dt.date] = None,
vendor: MarketDataVendor = MarketDataVendor.Goldman_Sachs,
pricing_location: Optional[PricingLocation] = None,
**kwargs
) -> Union[pd.Series, Tuple[pd.Series]]:
"""
Get coordinates data series
:param coordinates: market data coordinate(s)
:param start: start date or time
:param end: end date or time
:param vendor: data vendor
:param pricing_location: the location where close data has been recorded (not used for real-time query)
:param kwargs: Extra query arguments
:return: Series of the returned data
**Examples**
>>> coordinate = ("FX Fwd_USD/EUR_Fwd Pt_2y",)
>>> data = GsDataApi.coordinates_data_series(coordinate, dt.datetime(2019, 11, 18), dt.datetime(2019, 11, 19))
"""
dfs = cls.coordinates_data(
coordinates,
start=start,
end=end,
pricing_location=pricing_location,
vendor=vendor,
as_multiple_dataframes=True,
**kwargs
)
ret = tuple(pd.Series() if df.empty else pd.Series(index=df.index, data=df.value.values) for df in dfs)
if isinstance(coordinates, (MarketDataCoordinate, str)):
return ret[0]
else:
return ret
@staticmethod
@cachetools.cached(TTLCache(ttl=3600, maxsize=128))
def get_types(dataset_id: str):
results = GsSession.current._get(f'/data/catalog/{dataset_id}')
fields = results.get("fields")
if fields:
field_types = {}
for key, value in fields.items():
field_type = value.get('type')
field_format = value.get('format')
field_types[key] = field_format or field_type
return field_types
raise RuntimeError(f"Unable to get Dataset schema for {dataset_id}")
@classmethod
def construct_dataframe_with_types(cls, dataset_id: str, data: Union[Base, List, Tuple]) -> pd.DataFrame:
"""
Constructs a dataframe with correct date types.
:param dataset_id: id of the dataset
:param data: data to convert with correct types
:return: dataframe with correct types
"""
if len(data):
dataset_types = cls.get_types(dataset_id)
# Use first row to infer fields from data
incoming_data_data_types = pd.DataFrame([data[0]]).dtypes.to_dict()
df = pd.DataFrame(data, columns={**dataset_types, **incoming_data_data_types})
for field_name, type_name in dataset_types.items():
if df.get(field_name) is not None and type_name in ('date', 'date-time') and \
len(df.get(field_name).value_counts()) > 0:
df = df.astype({field_name: numpy.datetime64})
field_names = dataset_types.keys()
if 'date' in field_names:
df = df.set_index('date')
elif 'time' in field_names:
df = df.set_index('time')
return df
else:
return | pd.DataFrame({}) | pandas.DataFrame |
import const
import settings
import pandas as pd
import numpy as np
import random
from src.preprocess import reform, times
from src.feature_generators.lstm_fg import LSTMFG
from src import util
import time
import math
class HybridFG(LSTMFG):
def __init__(self, cfg):
"""
:param cfg:
:param time_steps: number of values to be considered as input (for hour_x = 1) step_x = hour count
"""
super(HybridFG, self).__init__(cfg, -1)
# Basic parameters
# Long features are more coarse grained and long range
self.meo_steps = cfg[const.MEO_STEPS] # time steps backward
self.meo_group = cfg[const.MEO_GROUP] # hours of each step
self.meo_long_steps = cfg[const.MEO_LONG_STEPS]
self.meo_long_group = 24
self.future_steps = cfg[const.FUTURE_STEPS]
self.future_group = cfg[const.FUTURE_GROUP]
self.air_steps = cfg[const.AIR_STEPS]
self.air_group = cfg[const.AIR_GROUP]
self.air_long_steps = cfg[const.AIR_LONG_STEPS]
self.air_long_group = 24
self.time_is_one_hot = True
self.meo_keys = [const.TEMP, const.HUM, const.WSPD] # [const.TEMP, const.HUM, const.WSPD]
self.future_keys = [const.TEMP, const.HUM, const.WSPD]
if cfg[const.CITY] == const.BJ:
self.air_keys = [const.PM25, const.PM10, const.O3]
elif cfg[const.CITY] == const.LD:
self.air_keys = [const.PM25, const.PM10] # no O3 for london
self.param_indicator = '%s_%s_%s_%s_%s_%s_%s_%s_%s_%s' % (self.meo_steps, self.meo_group,
self.meo_long_steps, self.meo_long_group,
self.future_steps, self.future_group,
self.air_steps, self.air_group,
self.air_long_steps, self.air_long_group)
self.features_path = self.config.get(const.FEATURE_DIR, "") + \
self.feature_indicator + '_' + self.param_indicator + '_hybrid_'
self.test_path = self.features_path + 'tests.csv'
# number of file chunks to put features into
self.chunk_count = self.config.get(const.CHUNK_COUNT, 1)
# train / test / validation data holders
self._exploded = {const.TRAIN: dict(), const.VALID: dict(), const.TEST: dict()}
# station data for context features like station location
self._stations = pd.DataFrame()
self._current_chunk = -1 # current feature file chunk
def generate(self, ts=None, stations=None, verbose=True, save=True):
"""
Create a basic feature set from pollutant time series, per hour
x: (time, longitude, latitude, pollutant values of t:t+n)
y: (pollutant values of t+n:t+n+m)
:return:
"""
# load_model data
if ts is None:
ts = pd.read_csv(self.config[const.OBSERVED], sep=";", low_memory=False)
if stations is None:
self._stations = pd.read_csv(self.config[const.STATIONS], sep=";", low_memory=False)
else:
self._stations = stations
self.data = reform.group_by_station(ts=ts, stations=self._stations)
features = list()
start_time = time.time()
stations = self._stations.to_dict(orient='index')
chunk_index = np.linspace(start=0, stop=len(stations) - 1, num=self.chunk_count + 1)
station_count = self._stations[const.PREDICT].sum()
processed_stations = 0
next_chunk = 1
total_data_points = 0
for s_index, s_info in stations.items():
if s_info[const.PREDICT] != 1:
continue
station_id = s_info[const.ID]
if verbose:
print(' Features of {sid} ({index} of {len})..'.
format(sid=station_id, index=s_index + 1, len=len(stations)))
s_data = self.data[station_id]
s_time = pd.to_datetime(s_data[const.TIME], format=const.T_FORMAT).tolist()
first_x = self.air_group * self.air_steps - 1
station_features = self.generate_per_station(station_id, s_data, s_time, first_x)
# aggregate all features per row
features.extend(station_features)
processed_stations += 1
# save current chunk and go to next
if save and (s_index >= chunk_index[next_chunk] or processed_stations == station_count):
# set and save the chunk of features
self.features = pd.DataFrame(data=features, columns=self.get_all_columns())
before_drop = len(self.features)
self.dropna()
after_drop = len(self.features)
print(' %d feature vectors dropped having NaN' % (before_drop - after_drop))
self.features = self.features.sample(frac=self.config[const.FRACTION])
self.save_features(chunk_id=next_chunk)
total_data_points += len(self.features)
# go to next chunk
features = list()
self.features = pd.DataFrame()
next_chunk += 1
if not save:
self.features = pd.DataFrame(data=features, columns=self.get_all_columns())
total_data_points = len(self.features)
print(total_data_points, 'feature vectors generated in', time.time() - start_time, 'secs')
return self
def generate_per_station(self, station_id, s_data, s_time, first_x):
# time of each data point (row)
t = s_time[first_x:] # first data point is started at 'first_x'
region = (first_x, -1) # region of values to be extracted as features
# Each data point (row): (measure1, measure2, ..) @ t0, (..) @ t1, .., (..) @ tN
def reshape(list, row_size):
return np.moveaxis(np.array(list), source=0, destination=2) \
.reshape((-1, row_size)).tolist()
# weather time series of last 'meo_steps' every 'meo_group' hours
meo_all = list()
for meo_key in self.meo_keys:
ts = times.split(time=s_time, value=s_data[meo_key].tolist(),
group_hours=self.meo_group, step=-self.meo_steps, region=region)
meo_all.append(ts)
meo = reshape(meo_all, row_size=self.meo_steps * len(meo_all))
# long range weather time series
meo_long_all = list()
for meo_key in self.meo_keys:
ts = times.split(time=s_time, value=s_data[meo_key].tolist(),
group_hours=self.meo_long_group, step=-self.meo_long_steps, region=region)
meo_long_all.append(ts)
meo_long = reshape(meo_long_all, row_size=self.meo_long_steps * len(meo_long_all))
# future weather time series of next 'future_steps' every 'future_group' hours
future_all = list()
for future_key in self.future_keys:
ts = times.split(time=s_time, value=s_data[future_key].tolist(),
group_hours=self.future_group, step=self.future_steps, region=region,
whole_group=True)
future_all.append(ts)
future = reshape(future_all, row_size=self.future_steps * len(future_all))
# air quality time series of last 'air_steps' every 'air_group' hours
air_all = list()
for air_key in self.air_keys:
ts = times.split(time=s_time, value=s_data[air_key].tolist(),
group_hours=self.air_group, step=-self.air_steps, region=region)
air_all.append(ts)
air = reshape(air_all, row_size=self.air_steps * len(air_all))
# long range air quality time series
air_long_all = list()
for air_key in self.air_keys:
ts = times.split(time=s_time, value=s_data[air_key].tolist(),
group_hours=self.air_long_group, step=-self.air_long_steps, region=region)
air_long_all.append(ts)
air_long = reshape(air_long_all, row_size=self.air_long_steps * len(air_long_all))
# next 48 pollutant values to be predicted
pollutant = self.config[const.POLLUTANT]
label = times.split(time=s_time, value=s_data[pollutant].tolist(),
group_hours=1, step=48, region=(first_x + 1, -1))
# station id per row
sid = [station_id] * (len(s_time) - first_x)
# aggregate all features per row
feature_set = [[s] + [t] + m + ml + f + al + a + l for s, t, m, ml, f, al, a, l in
zip(sid, t, meo, meo_long, future, air_long, air, label)]
return feature_set
def next(self, batch_size, progress=0, rotate=1):
"""
Next batch for training
:param batch_size:
:param progress: progress ratio to be used to move to next feature file chunks
:param rotate: number of times to rotate over all chunks until progress = 1
:return: tuple (context, meo_ts, future_ts, air_ts, label)
:rtype: (list, list, list, list, list)
"""
chunk_id = 1 + math.floor(rotate * progress * self.chunk_count) % self.chunk_count
if self._current_chunk != chunk_id:
self.load(chunk_id=chunk_id)
self._current_chunk = chunk_id
index = const.TRAIN
exploded = self._exploded[index]
sample_idx = np.random.randint(len(exploded['c']), size=batch_size)
context = exploded['c'][sample_idx, :]
meo = exploded['m'][sample_idx, :]
meo_long = exploded['ml'][sample_idx, :]
future = exploded['f'][sample_idx, :]
air = exploded['a'][sample_idx, :]
air_long = exploded['al'][sample_idx, :]
label = exploded['l'][sample_idx, :]
return {
'c': context,
'm': meo,
'ml': meo_long,
'f': future,
'a': air,
'al': air_long,
'l': label
}
def holdout(self, key=const.TEST):
"""
Return test data
:param key: key for TEST or VALID data
:return: tuple (context, meo_ts, future_ts, air_ts, label)
:rtype: (list, list, list, list, list)
"""
if len(self._exploded[key]) == 0:
self.load_holdout()
return self._exploded[key]
def load(self, chunk_id=1):
"""
Load a chunk of training data, separated into different inputs
:param chunk_id:
:return:
"""
train_from = self.config[const.TRAIN_FROM]
train_to = self.config[const.TRAIN_TO]
print(' Load train set from %s to %s' % (train_from, train_to))
features = pd.read_csv(self.features_path + str(chunk_id) + '.csv', sep=";", low_memory=False)
train_features = times.select(df=features, time_key=const.TIME,
from_time=train_from, to_time=train_to)
self._exploded[const.TRAIN] = self.explode(train_features)
print('Feature chunk {c} is prepared.'.format(c=chunk_id))
return self
def load_holdout(self):
valid_from = self.config.get(const.VALID_FROM, '00-00-00 00')
valid_to = self.config.get(const.VALID_TO, '00-00-00 00')
test_from = self.config[const.TEST_FROM]
test_to = self.config[const.TEST_TO]
print(' Load validation set from %s to %s' % (valid_from, valid_to))
print(' Load test set from %s to %s' % (test_from, test_to))
for chunk_id in range(1, self.chunk_count + 1):
input_features = pd.read_csv(self.features_path + str(chunk_id) + '.csv', sep=";", low_memory=False)
# extract test and validation data
features = dict()
features[const.VALID] = times.select(df=input_features, time_key=const.TIME,
from_time=valid_from, to_time=valid_to)
features[const.TEST] = times.select(df=input_features, time_key=const.TIME,
from_time=test_from, to_time=test_to)
# add feature to global test data
if len(self._test.index) == 0:
self._test = features[const.TEST]
else:
self._test = self._test.append(other=features[const.TEST], ignore_index=True)
# explode features into parts (context, weather time series, etc.)
for key in features:
exploded = self.explode(features[key])
if len(exploded) == 0:
continue
for part, value in exploded.items():
self._exploded[key][part] = value if part not in self._exploded[key] \
else np.concatenate((self._exploded[key][part], value), axis=0)
print(' Hold-out feature is prepared (valid: %d, test: %d).' % (
len(self._exploded[const.VALID]['c']), len(self._exploded[const.TEST]['c'])))
return self
def explode(self, features: pd.DataFrame):
"""
Explode features to context, time series, and label
:param features:
:return: exploded feature parts
:rtype: (dict)
"""
if len(self._stations.index) == 0:
self._stations = | pd.read_csv(self.config[const.STATIONS], sep=";", low_memory=False) | pandas.read_csv |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
DEFAULT_PAGINATION_SIZE = 5000 # for composite aggregations
PANDAS_VERSION: Tuple[int, ...] = tuple(
int(part) for part in pd.__version__.split(".") if part.isdigit()
)[:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = pd.Series().dtype
def build_pd_series(
data: Dict[str, Any], dtype: Optional[np.dtype] = None, **kwargs: Any
) -> pd.Series:
"""Builds a pd.Series while squelching the warning
for unspecified dtype on empty series
"""
dtype = dtype or (EMPTY_SERIES_DTYPE if not data else dtype)
if dtype is not None:
kwargs["dtype"] = dtype
return pd.Series(data, **kwargs)
def docstring_parameter(*sub: Any) -> Callable[[Any], Any]:
def dec(obj: Any) -> Any:
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class SortOrder(Enum):
ASC = 0
DESC = 1
@staticmethod
def reverse(order: "SortOrder") -> "SortOrder":
if order == SortOrder.ASC:
return SortOrder.DESC
return SortOrder.ASC
@staticmethod
def to_string(order: "SortOrder") -> str:
if order == SortOrder.ASC:
return "asc"
return "desc"
@staticmethod
def from_string(order: str) -> "SortOrder":
if order == "asc":
return SortOrder.ASC
return SortOrder.DESC
def elasticsearch_date_to_pandas_date(
value: Union[int, str], date_format: Optional[str]
) -> pd.Timestamp:
"""
Given a specific Elasticsearch format for a date datatype, returns the
'partial' `to_datetime` function to parse a given value in that format
**Date Formats: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats
Parameters
----------
value: Union[int, str]
The date value.
date_format: str
The Elasticsearch date format (ex. 'epoch_millis', 'epoch_second', etc.)
Returns
-------
datetime: pd.Timestamp
From https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
Date formats can be customised, but if no format is specified then it uses the default:
"strict_date_optional_time||epoch_millis"
Therefore if no format is specified we assume either strict_date_optional_time
or epoch_millis.
"""
if date_format is None or isinstance(value, (int, float)):
try:
return pd.to_datetime(
value, unit="s" if date_format == "epoch_second" else "ms"
)
except ValueError:
return pd.to_datetime(value)
elif date_format == "epoch_millis":
return pd.to_datetime(value, unit="ms")
elif date_format == "epoch_second":
return pd.to_datetime(value, unit="s")
elif date_format == "strict_date_optional_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "basic_date":
return pd.to_datetime(value, format="%Y%m%d")
elif date_format == "basic_date_time":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S.%f", exact=False)
elif date_format == "basic_date_time_no_millis":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S%z")
elif date_format == "basic_ordinal_date":
return pd.to_datetime(value, format="%Y%j")
elif date_format == "basic_ordinal_date_time":
return pd.to_datetime(value, format="%Y%jT%H%M%S.%f%z", exact=False)
elif date_format == "basic_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y%jT%H%M%S%z")
elif date_format == "basic_time":
return pd.to_datetime(value, format="%H%M%S.%f%z", exact=False)
elif date_format == "basic_time_no_millis":
return pd.to_datetime(value, format="%H%M%S%z")
elif date_format == "basic_t_time":
return pd.to_datetime(value, format="T%H%M%S.%f%z", exact=False)
elif date_format == "basic_t_time_no_millis":
return pd.to_datetime(value, format="T%H%M%S%z")
elif date_format == "basic_week_date":
return pd.to_datetime(value, format="%GW%V%u")
elif date_format == "basic_week_date_time":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S.%f%z", exact=False)
elif date_format == "basic_week_date_time_no_millis":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S%z")
elif date_format == "strict_date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "strict_date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "strict_date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "strict_date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "strict_date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "strict_hour":
return pd.to_datetime(value, format="%H")
elif date_format == "hour":
return pd.to_datetime(value, format="%H")
elif date_format == "strict_hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "strict_hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "strict_hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "strict_ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "strict_time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "strict_t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "strict_week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "strict_week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "strict_weekyear" or date_format == "weekyear":
# TODO investigate if there is a way of converting this
raise NotImplementedError(
"strict_weekyear is not implemented due to support in pandas"
)
return pd.to_datetime(value, format="%G")
# Not supported in pandas
# ValueError: ISO year directive '%G' must be used with the ISO week directive '%V'
# and a weekday directive '%A', '%a', '%w', or '%u'.
elif date_format == "strict_weekyear_week" or date_format == "weekyear_week":
# TODO investigate if there is a way of converting this
raise NotImplementedError(
"strict_weekyear_week is not implemented due to support in pandas"
)
return pd.to_datetime(value, format="%G-W%V")
# Not supported in pandas
# ValueError: ISO year directive '%G' must be used with the ISO week directive '%V'
# and a weekday directive '%A', '%a', '%w', or '%u'.
elif date_format == "strict_weekyear_week_day":
return | pd.to_datetime(value, format="%G-W%V-%u") | pandas.to_datetime |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([ | Timestamp("2017-01-01", tz=tz) | pandas.Timestamp |
# -*- coding: utf-8 -*-
import operator
import warnings
from collections import Counter
from os import environ, makedirs
from os import system, popen
from os.path import join, exists
import re
import keras.backend as K
import matplotlib as mpl
import numpy as np
import pandas as pd
from scipy.io import mmread
import shutil
from .logging import get_logger
from .model import build_dense_model as build_model
from .utils import get_data, exists_or_mkdir
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from adjustText import adjust_text
from matplotlib.transforms import Bbox
warnings.filterwarnings("ignore")
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams["font.sans-serif"] = "Arial"
environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logger = get_logger(__name__)
def vcf_to_sparse(outDir, inFeaID, inFeaBed, inVcf, featureID):
"""
convert the vcf to a feature matrix, `matrix.mtx`
:param outDir: output directory
:param inFeaID: `13885fea_exon_cut_100bp_2sample.txt` in dependency_data
:param inFeaBed: `tcga_13885fea_exon_cut_100bp.bed` in dependency_data
:param inVcf: the input vcf file path
:param featureID: `featureID.bed` in dependency_data
:return:
"""
logger.info('start converting Vcf to feature matrix')
exists_or_mkdir(outDir)
sample_name = inVcf.split('/')[-1].split('.')[0]
# --------------------------------------------------
vcf_list = []
with open(inVcf) as f:
for line in f:
if re.match('#', line):
pass
else:
data = line.strip().split('\t')
chrom = data[0] if re.match('chr', data[0]) else 'chr' + data[0]
start = int(data[1])
end = start + 1
qual = data[5]
alt = data[3] + ',' + data[4]
vcf_list.append([chrom, start, end, 1, qual, alt])
vcf_df = pd.DataFrame(vcf_list, columns=['chrom', 'start', 'end', 'sample_name', 'qual', 'alt'])
vcf_df.sort_values(by=['chrom', 'start'], ascending=True, inplace=True)
outSnpBedFile = join(outDir, 'snp_sampleID.bed')
vcf_df.to_csv(outSnpBedFile, sep='\t', header=None, index=None)
# --------------------------------------------------
feature_ls = list(pd.read_table(inFeaID, names=['fea'])['fea'])
nb_fea = len(feature_ls)
sparDir = join(outDir, 'DataSparse')
if not exists(sparDir):
makedirs(sparDir)
with open(join(sparDir, 'sample.tsv'), 'w') as output_sample:
output_sample.write('%s\n' % sample_name)
shutil.copyfile(inFeaID, join(sparDir, 'feature.tsv'))
outFeaId = join(sparDir, 'featureID.bed')
shutil.copyfile(featureID, outFeaId)
# --------------------------------------------------------------------------------
outSNVCoverWindow = join(outDir, 'window.snvCover.txt')
tmpMtxCountFile = join(sparDir, 'tmpMtx.count.txt')
out_testPat_mtx = join(sparDir, 'matrix.mtx')
system("bedtools intersect -a {0} -b {1} -wo > {2}".format(inFeaBed, outSnpBedFile, outSNVCoverWindow))
system(
" bedtools intersect -a %s -b %s -wo | awk \'{print $4\"\t\"$8\"\t\"\'1\'}\' | sort -u | sort -k1,1n -k2,2n > %s " % (
outFeaId, outSnpBedFile, tmpMtxCountFile))
nb_lines = int(popen('wc -l {0}'.format(tmpMtxCountFile)).read().strip().split(' ')[0])
with open(out_testPat_mtx, 'a') as f:
f.write('%%MatrixMarket matrix coordinate integer general\n%\n')
f.write('{0}\t{1}\t{2}\n'.format(nb_fea, 1, nb_lines))
system('cat {0} >> {1}'.format(tmpMtxCountFile, out_testPat_mtx))
# --------------------------------------------------------------------------------
def prediction(outDir, model_weight):
"""
predict single sample breast cancer risk
:param outDir: output directory
:param model_weight: the MiScan model weight file path
:return: (risk_to_be_patient, probability_to_be_normal)
"""
logger.info('start model evaluation')
model = build_model()
model.load_weights(model_weight)
test_array = mmread('{0}/DataSparse/matrix.mtx'.format(outDir)).todense().T
y_pred_ay = model.predict(test_array)
y_pred_pat = y_pred_ay[0][1]
y_pred_norm = y_pred_ay[0][0]
K.clear_session()
return y_pred_pat, y_pred_norm
def generate_report(inDir, outDir, y_pred_pat):
"""
generate report for single sample, including Cancer Risk Prediction | Top Gene Mutation Sites
:param inDir: for historical reason, actually, it's the path of `MISCAN.norm.trainPred.txt` and
'MISCAN.pat.trainPred.txt' in dependency_data
:param outDir: output directory
:param y_pred_pat: risk_to_be_patient from func `prediction`
:return:
"""
logger.info('start generating report')
fig, axes = plt.subplots(6, 1, figsize=(8, 8))
axes[0].set_position(Bbox([[0.02, 0.4], [0.98, 0.93]]))
# axes[0].set_title(r'$\underline{sin(x)}$', fontsize=30)
axes[0].text(0.5, 1, 'Feedback Report', fontsize=30, ha='center', style='italic')
# axes[0].text(0.5, 1, title, fontsize=30, ha='center', weight='bold')
axes[0].axis('off')
axes[5].set_position(Bbox([[0.02, 0.9], [0.98, 0.93]]))
axes[5].set_xlim([0, 1])
axes[5].plot([0.28, 0.72], [3, 3], color='black')
axes[5].axis('off')
axes[1].set_position(Bbox([[0.01, 0.8], [0.99, 0.88]]))
axes[1].text(0.01, 0.72, '1. Breast cancer risk predicted by MiScan', fontsize=20)
axes[1].axis('off')
axes[2].set_position(Bbox([[0.09, 0.57], [0.95, 0.83]]))
train_pat = pd.read_csv(inDir[0], header=None).values
train_norm = | pd.read_csv(inDir[1], header=None) | pandas.read_csv |
"""Provides backend functions for the model of MVC design.
"""
import os
import arrow
import pandas as pd
def get_coin_files(exc, save_path):
"""Provides all coin file paths in a given exchange's folder.
Args:
exc (obj): target exchange
save_path (str): main save path in OS
Returns:
(list): filtered coin file paths from all
files existed in the exchange folder
"""
exc_path = os.path.join(save_path, exc.name)
if not os.path.isdir(exc_path):
return []
all_files = os.listdir(exc_path)
coin_files = list(filter(lambda x:
x.count('_') == 5 and
x.count('-') == 2 and
x.find('.csv'), all_files))
return [os.path.join(exc_path, file) for file in coin_files]
def create_exc_folder(exc, save_path):
"""Creates a directory of exchange in the OS.
Args:
exc (str): target exchange
save_path (str): main save path in OS
"""
exc_path = os.path.join(save_path, exc.name)
if not os.path.isdir(exc_path):
os.mkdir(exc_path)
def read_last_update_from_file(file_path):
"""Reads the last date of data downloaded from coin file.
Args:
file_path (str): path of coin file
Returns:
(obj): last date of coin data
"""
with open(file_path) as f:
data = f.readlines()
if len(data) > 4:
return arrow.get(data[-1].split(';')[0])
def form_new_coin_data(comment, last_update):
"""Forms a coin data dictionary from existing file.
Args:
comment (str): info comment written in coin CSV file
last_update (list) : latest date and hour received from
downloaded coin data
Raises:
ValueError: occurs if info comment read from coin file is
in different format than expected
Returns:
coin_data (dict): coin data for object creation
"""
data = comment.split(' ')
if not len(data) == 8:
raise ValueError(
f'Csv file of {data[0]} found in the exchanage folder.\n'
'However the file name was not in correct format!\n\n')
return {'Name': data[0],
'Quote': data[1],
'Base': data[2],
'StartDate': data[3],
'StartHour': data[4],
'EndDate': data[5],
'EndHour': data[6],
'Frequency': data[7],
'LastUpdate': last_update}
def read_file_comment(file_path):
"""Reads info comment in a coin CSV file.
Args:
file_path (str): given coin file path
Raises:
ValueError: occurs if no comment exist at the top
of coin file
Returns:
comment (str): info comment in the coin file
"""
with open(file_path, 'r') as f:
line = f.readline()
if not line.startswith('#'):
file_name = file_path.split('\\')[2]
raise ValueError(
f"{file_name} does not include coin info comment!\n\n")
return line.replace('#', '')
def create_coin_file(exc, coin, save_path):
"""Creates a CSV file for a given coin.
Args:
exc (obj): exchange which coin belongs
coin (obj): target coin
save_path (str): main save path in OS
"""
headers = [i['Column Name'] for i in exc.db_columns]
exc_path = os.path.join(save_path, exc.name)
file_path = os.path.join(exc_path, coin.file_name)
if not os.path.isfile(file_path):
write_initial_comment(coin, file_path)
df = pd.DataFrame(columns=headers)
df.to_csv(file_path, index=False, sep=';', mode='a')
else:
raise FileExistsError(
f'{coin.name.upper()} already exists in the system:'
f'\n{file_path}')
def write_initial_comment(coin, file_path):
"""Creates an info comment for given coin and writes it in coin CSV file.
Args:
coin (obj): target coin
file_path (str): coin file path in OS
"""
comment = '#{} {} {} {} {} {}'.format(
coin.name,
coin.quote,
coin.base,
coin.start_date.format('DD-MM-YYYY HH:mm:ss'),
coin.end_date.format('DD-MM-YYYY HH:mm:ss'),
coin.frequency)
line = '\n#-----------------------------------------------------------'
with open(file_path, 'w') as f:
f.write(comment+line)
def delete_exc_folder(exc, save_path):
"""Deletes given exchange's folder from OS.
Args:
exc (obj): target exchange
save_path (str): main save path in OS
"""
exc_path = os.path.join(save_path, exc.name)
os.rmdir(exc_path)
def delete_coin_file(exc, coin, save_path):
"""Deletes given coin's csv file from OS.
Args:
exc (obj): exchange possessing coin
coin (obj) : target coin
save_path (str): main save path in OS
"""
exc_path = os.path.join(save_path, exc.name)
file_path = os.path.join(exc_path, coin.file_name)
os.remove(file_path)
def save_data(exc, coin, data, save_path):
"""Saves downloaded date information to the coin CSV file.
Args:
exc (obj): exchange possessing coin
coin (obj) : target coin
data (list): downloaded coin data
save_path (str): main save path
"""
exc_path = os.path.join(save_path, exc.name)
file_path = os.path.join(exc_path, coin.file_name)
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Race-car Data Creation Class.
This script contains all utilities to create proper dataset.
Revision History:
2020-05-10 (Animesh): Baseline Software.
2020-08-22 (Animesh): Updated Docstring.
Example:
from _data_handler import DataHandler
"""
#___Import Modules:
import os
import random
import pandas as pd
import matplotlib.pyplot as plt
from rc_nn_utility import ParseData
#___Global Variables:
SEED = 717
#__Classes:
class DataHandler:
"""Data Creation Utility Class.
This class contains all methods to complete create datasets such as random
data set, or 5 fold cross validation dataset.
"""
def __init__(self):
"""Constructor.
"""
pass
def merge_all(self, idir, output):
"""File Merger.
This method merges contents from multiple csv files.
Args:
idir (directory path): Directory path containing all csv files.
output (csv file): File containing all contents.
Returns:
(float): Accuracy percentage.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# write merged contents to output file
pd.DataFrame(content, columns =['image']).to_csv(output, index=False)
return None
def list_merge(self, lists):
"""List Merger.
This method merges contents from multiple lists.
Args:
lists (list): List of multiple lists to merge.
Returns:
data (list): Merged list.
"""
# loop over lists and put them all in one list
data = []
for list in lists:
data.extend(list)
return data
def refine_running(self, input, output, speed = 15):
"""Refine Running.
This method removes data with provided motor value from a list.
Args:
input (csv file): File containing contents to refine.
output (csv file): File containing refined contents.
speed (int): Motor value to be removed.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(input)
file = []
for index in range(len(data)):
# parse motor data to varify speed
_,_,mot = parsedata.parse_data(data["image"][index])
# append data if car is runneing
if mot != speed:
file.append(data["image"][index])
# write merged contents to output file
pd.DataFrame(file, columns=["image"]).to_csv(output, index=False)
return None
def histogram(self, ilist, odir):
"""Plot Histogram.
This method plots histogram from servo and motor value parsed from a
list of images.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
servo = []
motor = []
for index in range(len(data)):
# parse servo and motor data
_,ser,mot = parsedata.parse_data(data["image"][index])
servo.append(ser)
motor.append(mot)
# plot histogram of servo data
plt.figure()
plt.hist(servo, bins=11)
plt.title("Servo Data Histogram")
plt.savefig(os.path.join(odir,"Servo Data Histogram.png"))
# plot histogram of motor data
plt.figure()
plt.hist(motor, bins=11)
plt.title("Motor Data Histogram")
plt.savefig(os.path.join(odir,"Motor Data Histogram.png"))
return None
def devide_data(self, ilist, odir):
"""Dataset Devider.
This method devides dataset according to servo value.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
data_10 = []
data_11 = []
data_12 = []
data_13 = []
data_14 = []
data_15 = []
data_16 = []
data_17 = []
data_18 = []
data_19 = []
data_20 = []
for index in range(len(data)):
# parse servo and motor data
_,servo,_ = parsedata.parse_data(data["image"][index])
# devide dataset
if servo == 10:
data_10.append(data["image"][index])
elif servo == 11:
data_11.append(data["image"][index])
elif servo == 12:
data_12.append(data["image"][index])
elif servo == 13:
data_13.append(data["image"][index])
elif servo == 14:
data_14.append(data["image"][index])
elif servo == 15:
data_15.append(data["image"][index])
elif servo == 16:
data_16.append(data["image"][index])
elif servo == 17:
data_17.append(data["image"][index])
elif servo == 18:
data_18.append(data["image"][index])
elif servo == 19:
data_19.append(data["image"][index])
elif servo == 20:
data_20.append(data["image"][index])
# write data
pd.DataFrame(data_10, columns=["image"]).to_csv(os.path.join(odir, \
"servo_10.csv"), index=False)
pd.DataFrame(data_11, columns=["image"]).to_csv(os.path.join(odir, \
"servo_11.csv"), index=False)
pd.DataFrame(data_12, columns=["image"]).to_csv(os.path.join(odir, \
"servo_12.csv"), index=False)
pd.DataFrame(data_13, columns=["image"]).to_csv(os.path.join(odir, \
"servo_13.csv"), index=False)
pd.DataFrame(data_14, columns=["image"]).to_csv(os.path.join(odir, \
"servo_14.csv"), index=False)
pd.DataFrame(data_15, columns=["image"]).to_csv(os.path.join(odir, \
"servo_15.csv"), index=False)
pd.DataFrame(data_16, columns=["image"]).to_csv(os.path.join(odir, \
"servo_16.csv"), index=False)
pd.DataFrame(data_17, columns=["image"]).to_csv(os.path.join(odir, \
"servo_17.csv"), index=False)
pd.DataFrame(data_18, columns=["image"]).to_csv(os.path.join(odir, \
"servo_18.csv"), index=False)
pd.DataFrame(data_19, columns=["image"]).to_csv(os.path.join(odir, \
"servo_19.csv"), index=False)
pd.DataFrame(data_20, columns=["image"]).to_csv(os.path.join(odir, \
"servo_20.csv"), index=False)
return None
def train_test_dev(self, type, idir, odir, ratio=None, total=None):
"""Final Dataset Creator.
This method creates train, test and dev dataset.
Args:
type (string): Determines the type of input dataset
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
ratio (list): List containing ratio of train, test and dev dataset.
total (list): List containing the number of total data to be parsed
from each CSV file.
"""
if type == "random":
self.random(idir, odir, ratio)
elif type == "folded":
self.folded(idir, odir)
elif type == "controlled":
self.controlled(idir, odir, ratio, total)
return None
def random(self, idir, odir, ratio):
"""Randomly Shuffled Dataset Creator.
This method creates a randomly shuffled train, test and dev dataset.
Args:
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
ratio (list): List containing ratio of train, test and dev dataset.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# randomly shuffle dataset
random.shuffle(content)
# devide dataset into train, test, dev set according to given ratio
train = content[0:int(ratio[0]*len(content))]
test = content[int(ratio[0]*len(content)):
int((ratio[0]+ratio[1])*len(content))]
dev = content[int((ratio[0]+ratio[1])*len(content)):]
# write data
| pd.DataFrame(train, columns=["image"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import talib
def get_label(revenue, period_length):
if period_length == 1:
if revenue <= 0.1:
lab = 0
elif 0.1 < revenue <= 1:
lab = 1
elif 1 < revenue <= 3:
lab = 2
else:
lab = 3
elif period_length == 2:
if revenue <= 0.1:
lab = 0
elif 0.1 < revenue <= 2:
lab = 1
elif 2 < revenue <= 5:
lab = 2
else:
lab = 3
elif period_length == 4:
if revenue <= 0.2:
lab = 0
elif 0.2 < revenue <= 3:
lab = 1
elif 3 < revenue <= 8:
lab = 2
else:
lab = 3
else:
if revenue <= 0.3:
lab = 0
elif 0.3 < revenue <= 4:
lab = 1
elif 4 < revenue <= 12:
lab = 2
else:
lab = 3
return lab
def get_info(dataset, info_name):
info_array = np.array([float(x) for x in dataset[info_name]])
return info_array
# stockCode = '002371'
# stockCode = '600036'
stockCode = '600048'
kMultiples = [0.5, 1, 3, 6, 12, 24, 48, 96]
dayPeriods = [1, 2, 4, 8]
startDate = '2017-08-01'
endDate = '2018-08-01'
rootPath = '.././'
inputFile = rootPath + stockCode + '**.csv'
data = | pd.read_csv(inputFile, engine='python', skipfooter=1) | pandas.read_csv |
import pytest
from pysqlgui import core_database
from pysqlgui.core_table import Table
import pandas as pd
def test_init_no_parameters():
db = core_database.Database()
assert hasattr(db, "connection")
assert hasattr(db, "cursor")
assert hasattr(db, "name")
assert hasattr(db, "tables")
assert db.name is None
assert db.tables == []
def test_init_with_name_parameter_only():
db = core_database.Database(None, None, "name_of_db")
assert db.name == "name_of_db"
assert isinstance(db.name, str)
assert db.tables == []
def test_get_table_check_table_type():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert isinstance(db.get_table('example_table'), Table)
def test_get_table_on_non_existent_table():
db = core_database.Database()
with pytest.raises(ValueError):
db.get_table('some_table_name_that_doesnt_exist')
def test_remove_on_non_existent_table():
db = core_database.Database()
with pytest.raises(ValueError):
db.remove(Table(pd.DataFrame(), 'some_table_name_that_doesnt_exist'))
def test_remove_on_existent_table():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
t = db.get_table('example_table')
assert isinstance(t, Table)
assert db.remove(t) is None
assert len(db.tables) == 0
def test_summary_on_existent_table():
example_df = [pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])]
db = core_database.Database(example_df, ['example_table'])
df = db.summary()
assert isinstance(df, pd.DataFrame)
assert not df.empty
assert list(df.columns.values) == ['Table Name', 'Rows', 'Columns']
assert any(df['Table Name'] == 'example_table')
assert df[df['Table Name'] == 'example_table']['Rows'].values[0] == 3
assert df[df['Table Name'] == 'example_table']['Columns'].values[0] == 2
def test_info_on_existent_table_but_called_with_wrong_name():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
t = db.get_table('example_table')
assert isinstance(t, Table)
with pytest.raises(ValueError):
db.info('table_does_not_exist')
def test_info_on_existent_table():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
t = db.get_table('example_table')
assert isinstance(t, Table)
df = db.info('example_table')
assert isinstance(df, pd.DataFrame)
assert set(list(df.columns.values)) == {'Column ID', 'Column Name', 'Type', 'Not NULL?', 'Default Value', 'Primary Key?'}
assert all(df[df['Column Name'] == 'Primary Key?'])
assert any(df[df['Column Name'] == 'age'])
def test_run_query_simple_select():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert isinstance(db.run_query('SELECT * FROM example_table'), pd.DataFrame)
assert not db.run_query('SELECT * FROM example_table').empty
def test_run_query_with_pragma():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert isinstance(db.run_query('''PRAGMA TABLE_INFO('example_table')'''), pd.DataFrame)
assert not db.run_query('''PRAGMA TABLE_INFO('example_table')''').empty
def test_run_query_wrong_syntax():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
with pytest.raises(ValueError):
db.run_query('SELECT * FROMMMMM example_table')
def test_select_simple_query():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert not db.select('''SELECT * FROM example_table''').empty
def test_select_wrong_syntax():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
with pytest.raises(ValueError):
db.select('SELECT * FROMMMMM example_table')
def test_add_table_valid_data_in_list_but_no_table_name():
df = | pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age']) | pandas.DataFrame |
#!/usr/bin/env python
import json
import numpy as np
import pandas as pd
import os
import sys
import atomsci.ddm.pipeline.model_pipeline as mp
import atomsci.ddm.pipeline.parameter_parser as parse
import atomsci.ddm.utils.curate_data as curate_data
import atomsci.ddm.utils.struct_utils as struct_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import integrative_utilities
def clean():
"""
Clean test files
"""
for f in ['delaney-processed_curated.csv',
'delaney-processed_curated_fit.csv',
'delaney-processed_curated_external.csv',
'delaney-processed_curated_predict.csv']:
if os.path.isfile(f):
os.remove(f)
def curate():
"""
Curate dataset for model fitting
"""
if (not os.path.isfile('delaney-processed_curated.csv') and
not os.path.isfile('delaney-processed_curated_fit.csv') and
not os.path.isfile('delaney-processed_curated_external.csv')):
raw_df = pd.read_csv('delaney-processed.csv')
# Generate smiles, inchi
raw_df['rdkit_smiles'] = raw_df['smiles'].apply(curate_data.base_smiles_from_smiles)
raw_df['inchi_key'] = raw_df['smiles'].apply(struct_utils.smiles_to_inchi_key)
# Check for duplicate compounds based on SMILES string
# Average the response value for duplicates
# Remove compounds where response value variation is above the threshold
# tolerance=% of individual respsonse value is allowed to different from the average to be included in averaging.
# max_std = maximum allowed standard deviation for computed average response value
tolerance = 10 # percentage
column = 'measured log solubility in mols per litre'
list_bad_duplicates = 'Yes'
data = raw_df
max_std = 100000 # esentially turned off in this example
data['compound_id'] = data['inchi_key']
curated_df = curate_data.average_and_remove_duplicates(
column, tolerance, list_bad_duplicates, data, max_std, compound_id='compound_id', smiles_col='rdkit_smiles')
# Check distribution of response values
assert (curated_df.shape[0] == 1117), 'Error: Incorrect number of compounds'
curated_df.to_csv('delaney-processed_curated.csv')
# Create second test set by reproducible index for prediction
curated_df.tail(1000).to_csv('delaney-processed_curated_fit.csv')
curated_df.head(117).to_csv('delaney-processed_curated_external.csv')
assert (os.path.isfile('delaney-processed_curated.csv'))
assert (os.path.isfile('delaney-processed_curated_fit.csv'))
assert (os.path.isfile('delaney-processed_curated_external.csv'))
def download():
"""
Separate download function so that download can be run separately if there is no internet.
"""
if (not os.path.isfile('delaney-processed.csv')):
integrative_utilities.download_save(
'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/delaney-processed.csv',
'delaney-processed.csv')
assert (os.path.isfile('delaney-processed.csv'))
def test():
"""
Test full model pipeline: Curate data, fit model, and predict property for new compounds
"""
# Clean
# -----
integrative_utilities.clean_fit_predict()
clean()
# Download
# --------
download()
# Curate
# ------
curate()
# Train model
# -----------
# Read parameter JSON file
with open('config_delaney_fit_RF.json') as f:
config = json.loads(f.read())
# Parse parameters
params = parse.wrapper(config)
# Create model pipeline
model = mp.ModelPipeline(params)
# Train model
model.train_model()
# Get uuid and reload directory
# -----------------------------
model_dir = 'result/delaney-processed_curated_fit/RF_computed_descriptors_scaffold_regression'
uuid = integrative_utilities.get_subdirectory(model_dir)
reload_dir = model_dir+'/'+uuid
# Check training statistics
# -------------------------
integrative_utilities.training_statistics_file(reload_dir, 'test', 0.6)
# Make prediction parameters
# --------------------------
# Read prediction parameter JSON file
with open('config_delaney_predict_RF.json', 'r') as f:
predict_parameters_dict = json.loads(f.read())
# Set transformer key here because model uuid is not known before fit
predict_parameters_dict['transformer_key'] = reload_dir+'transformers.pkl'
# Set output directory for random forest (RF) model
predict_parameters_dict['result_dir'] = reload_dir
predict_parameters = parse.wrapper(predict_parameters_dict)
# Load second test set
# --------------------
data = | pd.read_csv('delaney-processed_curated_external.csv') | pandas.read_csv |
import calendar
import datetime
import warnings
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
from numpy.testing import assert_allclose
import pytest
from pvlib._deprecation import pvlibDeprecationWarning
from pvlib.location import Location
from pvlib import solarposition, spa
from conftest import (fail_on_pvlib_version, requires_ephem, needs_pandas_0_17,
requires_spa_c, requires_numba)
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014, 6, 24),
end=datetime.datetime(2014, 6, 26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def golden():
return Location(39.742476, -105.1786, 'America/Denver', 1830.14)
@pytest.fixture()
def golden_mst():
return Location(39.742476, -105.1786, 'MST', 1830.14)
@pytest.fixture()
def expected_solpos():
return _expected_solpos_df()
# hack to make tests work without too much modification while avoiding
# pytest 4.0 inability to call features directly
def _expected_solpos_df():
return pd.DataFrame({'elevation': 39.872046,
'apparent_zenith': 50.111622,
'azimuth': 194.340241,
'apparent_elevation': 39.888378},
index=['2003-10-17T12:30:30Z'])
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z'])
@pytest.fixture()
def expected_rise_set_spa():
# for Golden, CO, from NREL SPA website
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),
]).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 21, 55),
datetime.datetime(2015, 8, 2, 5, 0, 27)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 47, 43),
datetime.datetime(2015, 8, 2, 19, 13, 58)
]).tz_localize('MST').tolist()
transit = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 12, 4, 45),
datetime.datetime(2015, 8, 2, 12, 6, 58)
]).tz_localize('MST').tolist()
return pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit},
index=times)
@pytest.fixture()
def expected_rise_set_ephem():
# for Golden, CO, from USNO websites
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 1),
datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 1, 3),
datetime.datetime(2015, 8, 2),
]).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 7, 22, 0),
datetime.datetime(2015, 1, 2, 7, 22, 0),
datetime.datetime(2015, 1, 3, 7, 22, 0),
datetime.datetime(2015, 8, 2, 5, 0, 0)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 16, 47, 0),
datetime.datetime(2015, 1, 2, 16, 48, 0),
datetime.datetime(2015, 1, 3, 16, 49, 0),
datetime.datetime(2015, 8, 2, 19, 13, 0)
]).tz_localize('MST').tolist()
transit = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 12, 4, 0),
datetime.datetime(2015, 1, 2, 12, 5, 0),
datetime.datetime(2015, 1, 3, 12, 5, 0),
datetime.datetime(2015, 8, 2, 12, 7, 0)
]).tz_localize('MST').tolist()
return pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit},
index=times)
@fail_on_pvlib_version('0.7')
def test_deprecated_07():
tt = pd.DatetimeIndex(['2015-01-01 00:00:00']).tz_localize('MST')
with pytest.warns(pvlibDeprecationWarning):
solarposition.get_sun_rise_set_transit(tt,
39.7,
-105.2)
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_spa_c
def test_spa_c_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_c(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@needs_pandas_0_17
def test_sun_rise_set_transit_spa(expected_rise_set_spa, golden):
# solution from NREL SAP web calculator
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15),
datetime.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4),
datetime.datetime(2004, 12, 4, 19, 2, 3)]
).tz_localize('UTC').tolist()
transit = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 12, 4, 36),
datetime.datetime(2004, 12, 4, 11, 50, 22)]
).tz_localize('UTC').tolist()
frame = pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit}, index=times)
result = solarposition.sun_rise_set_transit_spa(times, south.latitude,
south.longitude,
delta_t=65.0)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('1s')
assert_frame_equal(frame, result_rounded)
# test for Golden, CO compare to NREL SPA
result = solarposition.sun_rise_set_transit_spa(
expected_rise_set_spa.index, golden.latitude, golden.longitude,
delta_t=65.0)
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('s').tz_convert('MST')
assert_frame_equal(expected_rise_set_spa, result_rounded)
@requires_ephem
def test_sun_rise_set_transit_ephem(expected_rise_set_ephem, golden):
# test for Golden, CO compare to USNO, using local midnight
result = solarposition.sun_rise_set_transit_ephem(
expected_rise_set_ephem.index, golden.latitude, golden.longitude,
next_or_previous='next', altitude=golden.altitude, pressure=0,
temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected_rise_set_ephem, result_rounded)
# test next sunrise/sunset with times
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0),
datetime.datetime(2015, 1, 2, 10, 15, 0),
datetime.datetime(2015, 1, 2, 15, 3, 0),
datetime.datetime(2015, 1, 2, 21, 6, 7)
]).tz_localize('MST')
expected = pd.DataFrame(index=times,
columns=['sunrise', 'sunset'],
dtype='datetime64[ns]')
expected['sunrise'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise']])
expected['sunset'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunset']])
expected['transit'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit']])
result = solarposition.sun_rise_set_transit_ephem(times,
golden.latitude,
golden.longitude,
next_or_previous='next',
altitude=golden.altitude,
pressure=0,
temperature=11,
horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected, result_rounded)
# test previous sunrise/sunset with times
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0),
datetime.datetime(2015, 1, 2, 10, 15, 0),
datetime.datetime(2015, 1, 3, 3, 0, 0),
datetime.datetime(2015, 1, 3, 13, 6, 7)
]).tz_localize('MST')
expected = pd.DataFrame(index=times,
columns=['sunrise', 'sunset'],
dtype='datetime64[ns]')
expected['sunrise'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise']])
expected['sunset'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset']])
expected['transit'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit']])
result = solarposition.sun_rise_set_transit_ephem(
times,
golden.latitude, golden.longitude, next_or_previous='previous',
altitude=golden.altitude, pressure=0, temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected, result_rounded)
# test with different timezone
times = times.tz_convert('UTC')
expected = expected.tz_convert('UTC') # resuse result from previous
for col, data in expected.iteritems():
expected[col] = data.dt.tz_convert('UTC')
result = solarposition.sun_rise_set_transit_ephem(
times,
golden.latitude, golden.longitude, next_or_previous='previous',
altitude=golden.altitude, pressure=0, temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert(times.tz)
assert_frame_equal(expected, result_rounded)
@requires_ephem
def test_sun_rise_set_transit_ephem_error(expected_rise_set_ephem, golden):
with pytest.raises(ValueError):
solarposition.sun_rise_set_transit_ephem(expected_rise_set_ephem.index,
golden.latitude,
golden.longitude,
next_or_previous='other')
tz_naive = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0)])
with pytest.raises(ValueError):
solarposition.sun_rise_set_transit_ephem(tz_naive,
golden.latitude,
golden.longitude,
next_or_previous='next')
@requires_ephem
def test_sun_rise_set_transit_ephem_horizon(golden):
times = pd.DatetimeIndex([datetime.datetime(2016, 1, 3, 0, 0, 0)
]).tz_localize('MST')
# center of sun disk
center = solarposition.sun_rise_set_transit_ephem(
times,
latitude=golden.latitude, longitude=golden.longitude)
edge = solarposition.sun_rise_set_transit_ephem(
times,
latitude=golden.latitude, longitude=golden.longitude, horizon='-0:34')
result_rounded = (edge['sunrise'] - center['sunrise']).dt.round('min')
sunrise_delta = datetime.datetime(2016, 1, 3, 7, 17, 11) - \
datetime.datetime(2016, 1, 3, 7, 21, 33)
expected = pd.Series(index=times,
data=sunrise_delta,
name='sunrise').dt.round('min')
| assert_series_equal(expected, result_rounded) | pandas.util.testing.assert_series_equal |
import sys
import logging
import argparse
import pandas as pd
def compute_score(predictions, actual):
"""Look at 5% of most highly predicted movies for each user.
Return the average actual rating of those movies.
"""
df = pd.merge(predictions, actual, on=['user','movie']).fillna(1.0)
#df = pd.concat([predictions.fillna(1.0), actual.actualrating], axis=1)
# for each user
g = df.groupby('user')
# detect the top_5 movies as predicted by your algorithm
top_5 = g.rating.transform(
lambda x: x >= x.quantile(.95)
)
# return the mean of the actual score on those
return df.actualrating[top_5==1].mean()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--silent', action='store_true', help="deactivate debug output")
parser.add_argument('--testing', help="testing set")
parser.add_argument("predfile", nargs=1, help="prediction file to submit")
args = parser.parse_args()
if args.silent:
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
else:
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
logger = logging.getLogger('reco-cs')
path_testing_ = args.testing if args.testing else "data/testing.csv"
logger.debug("using groundtruth from {}".format(path_testing_))
logger.debug("using predictions from {}".format(args.predfile[0]))
prediction_data = | pd.read_csv(args.predfile[0]) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 15:41:04 2021
Run MLR hedonic with run_MLR_on_all_years(features=best1)
use plot_price_rooms_new_from_new_ds for time_series new rooms MLR
for standertized betas use plot_regular_feats_comparison_from_new_ds
For RF, HP tuning :
run_CV_on_all_years(df,savepath=ml_path,model_name='RF', feats=best_rf2+['SEI'])
Multifunction for RF results:
loop_over_RF_models_years(df, path=work_david/'ML', mode='score',
pgrid='normal'
use mode = 'score' to calculate the R^2 for training and test
use mode = 'time-series' to get the predictions.
use mode = 'shap' to calculate the SHAP values for the test sets.(warning this takes longest)
use mode = 'X_test' to get the test sets.
use mode = 'FI' to get feature importances.
then there are plot functions for RF and MLR:
1) plot_RF_time_series(time-series)
2) plot_RF_FI_results(fi)
3) First, produce MLR SHAPS: svs=produce_shap_MLR_all_years(df)
then, produce_RF_abs_SHAP_all_years(path=ml_path/'RF_rooms_345',mlr_shap=svs)
4)
how to produce weighted mean distance to ECs for all Israeli settelments:
first load israeli settelment mid-points:
gdf=geo_location_settelments_israel() (from cbs_procedures)
then run calculate_distance_from_gdf_to_employment_centers:
dis = calculate_distance_from_gdf_to_employment_centers(gdf,n=18, x_coord_name='X', y_coord_name='Y')
finally save to csv:
dis.to_csv(work_david/'Israel_settlments_with_mean_weighted_distance_to_ECs.csv', na_rep='NA',sep=',', index=False)
@author: shlomi
"""
from MA_paths import work_david
from MA_paths import savefig_path
import numpy as np
ml_path = work_david / 'ML'
features = ['Floor_number', 'SEI', 'New', 'Periph_value', 'Sale_year', 'Rooms_345',
'distance_to_nearest_kindergarten', 'distance_to_nearest_school', 'Total_ends']
features1 = ['FLOORNO', 'DEALNATURE', 'NEWPROJECTTEXT',
'BUILDINGYEAR', 'SEI_value', 'Ground', 'P2015_value', 'year', 'Building_Growth_Rate']
features2 = ['FLOORNO', 'DEALNATURE', 'NEWPROJECTTEXT',
'SEI_value', 'Ground', 'year', 'Building_Growth_Rate']
features3 = ['Floor_number', 'SEI', 'New', 'Periph_value', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_4_mokdim']
best = ['SEI', 'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim', 'Netflow']
best1 = ['SEI', 'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim']
best_years = best + ['year_{}'.format(x) for x in np.arange(2001, 2020)]
best_for_bs = best + ['city_code', 'Price']
next_best = ['Floor_number', 'New', 'Sale_year', 'Rooms',
'Total_ends']
best_rf = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms','Netflow',
'Total_ends', 'mean_distance_to_28_mokdim']
best_rf1 = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms',
'Total_ends', 'mean_distance_to_28_mokdim']
best_rf2 = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim']
dummies = ['New', 'Rooms_4', 'Rooms_5']
year_dummies = ['year_{}'.format(x) for x in np.arange(2001,2020)]
room_dummies = ['Rooms_4', 'Rooms_5']
best_regular = ['SEI', 'Total_ends', 'mean_distance_to_28_mokdim', 'Netflow']
best_regular1 = ['SEI', 'Total_ends', 'mean_distance_to_28_mokdim']
general_features = ['Price', 'Rooms', 'Area_m2', 'New', 'Floor_number', 'Floors_In_Building',
'Age', 'Total_ends', 'SEI', 'mean_distance_to_28_mokdim']
apts = ['דירה', 'דירה בבית קומות']
apts_more = apts + ["קוטג' דו משפחתי", "קוטג' חד משפחתי",
"דירת גן", "בית בודד", "דירת גג", "דירת גג (פנטהאוז)"]
plot_names = {'Floor_number': 'Floor',
# 'New': 'New Apartment',
'Periph_value': 'Peripheriality',
'distance_to_nearest_kindergarten': 'Nearest kindergarten',
'distance_to_nearest_school': 'Nearest school',
'Total_ends': 'Building rate',
'mean_distance_to_28_mokdim': 'Distance to ECs',
'SEI': 'Socio-Economic Index',
'SEI_value_2015': 'Social-Economic Index',
'SEI_value_2017': 'Social-Economic Index',
'Rooms': 'Rooms', 'Rooms_3': '3 Rooms', 'Rooms_5': '5 Rooms',
'Netflow': 'Net migration',
'MISH': 'AHP',
'New': 'Used/New'
}
short_plot_names = {'Total_ends': 'BR',
'mean_distance_to_28_mokdim': 'Distance',
'SEI': 'SEI', 'New': 'Used/New'}
vars_plot_names = {'Total_ends': 'BR',
'mean_distance_to_28_mokdim': 'DI',
'SEI': 'SE', 'New': 'NE', 'Rooms': 'RM'}
vars_explained_plot_names = {'Total_ends': 'BR (Building Rate)',
'mean_distance_to_28_mokdim': 'DI (Distance to ECs)',
'SEI': 'SE (Socio-Economic Index)', 'New': 'NE (Used/New)', 'Rooms': 'RM (# of Rooms)'}
add_units_dict = {'Distance': 'Distance [km]', 'BR': r'BR [Apts$\cdot$yr$^{-1}$]',
'Netflow': r'Netflow [people$\cdot$yr$^{-1}$]'}
add_units_dict_short = {'DI': 'DI [km]', 'BR': r'BR [Apts$\cdot$yr$^{-1}$]'}
# AHP : Afforable Housing Program
def pct_change(x):
import numpy as np
return (np.exp(x)-1)*100
def plot_single_tree(rf_model, X_train, y_train, est_index=100, samples=25, max_depth=2):
from sklearn import tree
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# rf = RandomForestRegressor(max_depth=15,n_estimators=250)
# feats = ['Status', 'Rooms', 'BR', 'Distance', 'SEI']
X_train = X_train.rename(vars_plot_names, axis=1)
feats = ['NE', 'RM', 'BR', 'DI', 'SE']
# sns.set_theme(font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
inds = X_train.sample(n=samples).index
y_train = np.log(np.exp(y_train)/4)
rf_model.fit(X_train.loc[inds], y_train.loc[inds])
_ = tree.plot_tree(rf_model[est_index],precision=2, fontsize=18, rounded=True,
feature_names=feats, filled=True, ax=ax, max_depth=max_depth, proportion=False)
filename = 'Nadlan_tree_example.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return fig
def compare_r2_RF_MLR(sc, ds, mode='diagram'):
"""compare R2 score from dataset (sc=loop_over with mode=score)
and ds=run_MLR_on_all_years"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.6)
fig, ax = plt.subplots(figsize=(17, 10))
df = ds['R-squared'].to_dataframe()
df = pd.concat([df, sc], axis=1)
df.columns = ['Hedonic', 'RF train', 'RF test']
df['year'] = df.index
df = df.melt(id_vars=['year'], var_name='Model',
value_name=r'R$^2$')
# df['year'] = pd.to_datetime(df['year'], format='%Y')
if mode == 'diagram':
ax = sns.barplot(data=df, x='year', ax=ax, hue='Model', y=r'R$^2$')
# ax.set_ylabel('Apartment area [{}]'.format(unit_label))
h, l =ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, l, ncol=3, title='Model')
ax.set_xlabel('')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
# for wide dataframe:
# df = df.pivot_table(columns=['Model'],values='R$^2$',index='year')
return df
def remove_outlier_area_per_room(df, col='Area_m2', k=1.5):
from Migration_main import remove_outlier
import pandas as pd
dfs = []
for room in df['Rooms'].dropna().unique():
df1 = remove_outlier(df[df['Rooms'] == room], col_name=col, k=k)
dfs.append(df1)
df = pd.concat(dfs, axis=0)
return df
def plot_rooms_area_distribution(df, units='m2'):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
if units == 'ft2':
df['Area_ft2'] = df['Area_m2'] * 10.764
col = 'Area_ft2'
unit_label = 'ft$^2$'
elif units == 'm2':
col = 'Area_m2'
unit_label = 'm$^2$'
sns.violinplot(data=df, x='Rooms', y=col, ax=ax, palette='inferno')
ax.set_ylabel('Apartment area [{}]'.format(unit_label))
ax.set_xlabel('Number of rooms')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def plot_general_features_corr_heatmap(df, feats=general_features, year=None):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.5)
fig, ax = plt.subplots(figsize=(17, 10))
if year is not None:
df = df[df['Sale_year']==year]
title = 'year = {}'.format(year)
else:
title = '2000 to 2019'
dff = df[feats]
dff = dff.rename(short_plot_names, axis=1)
g = sns.heatmap(dff.corr(),annot=True,cmap='coolwarm', ax=ax, center=0)
g.set_xticklabels(g.get_xticklabels(), rotation=45, ha='right')
fig.tight_layout()
fig.suptitle(title)
fig.subplots_adjust(top=0.945)
return fig
def plot_RF_time_series(X_ts, units='nis'):
"""plot rooms new time series from RF model"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from cbs_procedures import read_mean_salary
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
X_ts = X_ts[X_ts['Rooms'].isin([3, 4, 5])]
X_ts['Rooms'] = X_ts['Rooms'].astype(int)
X_ts = X_ts.rename({'New': 'Used/New'}, axis=1)
X_ts['Used/New'][X_ts['Used/New']==0] = 'Used'
X_ts['Used/New'][X_ts['Used/New']==1] = 'New'
if units == 'dollar':
X_ts['Price'] /= 4 * 1000
ylabel = 'Apartment Price [Thousands $]'
elif units == 'nis':
X_ts['Price'] /= 1e6
ylabel = 'Apartment Price [millions NIS]'
elif units == 'salary':
sal = read_mean_salary().rename({'year': 'Year'}, axis=1)
X_ts = pd.merge(X_ts, sal, on='Year', how='inner')
X_ts['Price'] /= X_ts['mean_salary']
ylabel = 'Mean salary'
X_ts['Year'] = pd.to_datetime(X_ts['Year'], format='%Y')
X_ts = X_ts.reset_index(drop=True)
sns.lineplot(data=X_ts, x='Year', y='Price', hue='Rooms', style='Used/New',
ax=ax, palette='tab10', markers=True, markersize=10)
ax.set_ylabel(ylabel)
ax.set_xlabel('')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def produce_shap_MLR_all_years(df, feats=best1, abs_val=True):
from sklearn.linear_model import LinearRegression
import shap
import numpy as np
years = np.arange(2000, 2020, 1)
svs = []
for year in years:
print(year)
X, y = prepare_new_X_y_with_year(df, features=feats, year=year,
y_name='Price')
lr = LinearRegression()
lr.fit(X, y)
ex = shap.LinearExplainer(lr, X)
shap_values = ex.shap_values(X)
SV = convert_shap_values_to_pandas(shap_values, X)
if abs_val:
print('producing ABS SHAP.')
SV = produce_abs_SHAP_from_df(SV, X, plot=False)
svs.append(SV)
return svs
def loop_over_RF_models_years(df, path=work_david/'ML', mode='score',
pgrid='normal', feats=best_rf2+['SEI']):
import numpy as np
import pandas as pd
import shap
import xarray as xr
years = np.arange(2000, 2020, 1)
train_scores = []
test_scores = []
x_tests = []
fis = []
# shaps = []
for year in years:
print(year)
_, gr = load_HP_params_from_optimized_model(path, pgrid=pgrid,
year=year)
rf = gr.best_estimator_
X_train, X_test, y_train, y_test = produce_X_y_RF_per_year(df,
year=year,
verbose=0, feats=feats)
rf.fit(X_train, y_train)
if mode == 'score':
train_scores.append(rf.score(X_train, y_train))
test_scores.append(rf.score(X_test, y_test))
elif mode == 'time-series':
y_pred = rf.predict(X_test)
y_pred = np.exp(y_pred)
X_test['Price'] = y_pred
X_test['Year'] = year
X_test = X_test.reset_index(drop=True)
x_tests.append(X_test)
elif mode == 'shap':
# rf.fit(X_train, y_train)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X_test.values)
SV = convert_shap_values_to_pandas(shap_values, X_test)
filename = 'Nadlan_SHAP_RF_{}.csv'.format(year)
SV.to_csv(path/filename, index=False)
# SV = SV.to_xarray().to_array('feature')
# return SV, X_test
# shaps.append(SV)
elif mode == 'X_test':
X_test.index.name = 'sample'
filename = 'Nadlan_X_test_RF_{}.csv'.format(year)
X_test.to_csv(path/filename, index=False)
# x_tests.append(X_test.to_xarray().to_array('feature'))
elif mode == 'FI':
fi = pd.DataFrame(rf.feature_importances_).T
fi.columns = X_train.columns
fi['year'] = year
fis.append(fi)
if mode == 'score':
sc = pd.DataFrame(train_scores)
sc.columns = ['train_r2']
sc['test_r2'] = test_scores
sc.index = years
return sc
elif mode == 'time-series':
X_ts = pd.concat(x_tests, axis=0)
return X_ts
elif mode == 'FI':
FI = pd.concat(fis, axis=0)
return FI
# elif mode == 'shap':
# sv_da = xr.concat(shaps, 'year')
# sv_da['year'] = years
# sv_da.attrs['long_name'] = 'Shapley values via SHAP Python package.'
# sv_da.to_netcdf(path/'Nadlan_SHAP_RF_{}-{}.nc'.format(years[0], years[-1]))
# return sv_da
# elif mode == 'X_test':
# X_ts = xr.concat(x_tests, 'year')
# X_ts['year'] = years
# X_ts.attrs['long_name'] = 'X_tests per year to use with the SHAP'
# X_ts.to_netcdf(path/'Nadlan_X_test_RF_{}-{}.nc'.format(years[0], years[-1]))
# return X_ts
def load_all_yearly_shap_values(path=work_david/'ML'):
import numpy as np
years = np.arange(2000, 2020, 1)
svs = []
X_tests = []
for year in years:
sv, X_test = load_yearly_shap_values(path, year)
svs.append(sv)
X_tests.append(X_test)
return svs, X_tests
def load_yearly_shap_values(path=work_david/'ML', year=2000):
import pandas as pd
X_test = pd.read_csv(path/'Nadlan_X_test_RF_{}.csv'.format(year))
shap_values = pd.read_csv(path/'Nadlan_SHAP_RF_{}.csv'.format(year))
assert len(X_test)==len(shap_values)
return shap_values, X_test
def load_shap_values(path=work_david/'ML', samples=10000,
interaction_too=True, rename=True):
import pandas as pd
import xarray as xr
print('loading {} samples.'.format(samples))
X_test = pd.read_csv(path/'X_test_RF_{}.csv'.format(samples))
shap_values = pd.read_csv(path/'SHAP_values_RF_{}.csv'.format(samples))
if rename:
X_test = X_test.rename(short_plot_names, axis=1)
shap_values = shap_values.rename(short_plot_names, axis=1)
if interaction_too:
print('loading interaction values too.')
shap_interaction_values = xr.load_dataarray(path/'SHAP_interaction_values_RF_{}.nc'.format(samples))
shap_interaction_values['feature1'] = X_test.columns
shap_interaction_values['feature2'] = X_test.columns
return X_test, shap_values, shap_interaction_values
else:
return X_test, shap_values
def plot_dependence(shap_values, X_test, x_feature='RM',
y_features=['DI', 'SE', 'BR'],
alpha=0.2, cmap=None, units='pct_change',
plot_size=1.5, fontsize=16, x_jitter=0.75):
import shap
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.ticker as tck
sns.set_theme(style='ticks', font_scale=1.2)
fig, axes = plt.subplots(len(y_features), 1, sharex=True, figsize=(8, 10))
X = X_test.copy()
X = X.rename(vars_plot_names, axis=1)
shap_values = shap_values.rename(vars_plot_names, axis=1)
X = X.rename(add_units_dict_short, axis=1)
# X['Old/New'] = X['Old/New'].astype(int)
# new_dict = {0: 'Old', 1: 'New'}
# X['Old/New'] = X['Old/New'].map(new_dict)
if units == 'pct_change':
shap_values = shap_values.apply(pct_change)
for i, y in enumerate(y_features):
y_new = add_units_dict_short.get(y, y)
shap.dependence_plot(x_feature, shap_values.values, X, x_jitter=x_jitter,
dot_size=4, alpha=alpha, interaction_index=y_new,
ax=axes[i])
if 'DI' in x_feature:
axes[i].set_xlim(25, 150)
if 'RM' in x_feature:
axes[i].set_xlabel('RM [# of rooms]')
cb = fig.axes[-1]
mapp = cb.collections[1]
fig.canvas.draw()
cbar = fig.colorbar(mapp, ax=axes[i],aspect=50, pad=0.05,
label=y_new)
cbar.set_alpha(0.85)
cbar.draw_all()
cb.remove()
# cbar.ax.set_yticklabels(['Low', 'High'], fontsize=fontsize)
# cbar.set_label('Predictor value')
cbar.outline.set_visible(False)
# axes[i].set_ylabel(axes[i].get_ylabel(), fontsize=fontsize)
# axes[i].set_xlabel(axes[i].get_xlabel(), fontsize=fontsize)
# axes[i].tick_params(labelsize=fontsize)
axes[i].grid(True)
if units == 'pct_change':
la = 'Price change\nfor {} [%]'.format(x_feature)
axes[i].set_ylabel(la)
[ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) for ax in fig.axes]
[ax.tick_params(labelsize=fontsize) for ax in fig.axes]
[ax.yaxis.set_major_locator(tck.MaxNLocator(5)) for ax in fig.axes]
fig.tight_layout()
return fig
def plot_summary_shap_values(shap_values, X_test, alpha=0.7, cmap=None,
plot_size=1.5, fontsize=16, units='pct_change'):
import shap
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.8)
X_test = X_test.rename(vars_plot_names, axis=1)
shap_values = shap_values.rename(vars_plot_names, axis=1)
if units == 'pct_change':
shap_values = shap_values.apply(pct_change)
if cmap is None:
shap.summary_plot(shap_values.values, X_test, alpha=alpha, plot_size=plot_size)
else:
if not isinstance(cmap, str):
cm = cmap.get_mpl_colormap()
else:
cm = sns.color_palette(cmap, as_cmap=True)
shap.summary_plot(shap_values.values, X_test, alpha=alpha, cmap=cm, plot_size=plot_size)
if len(shap_values.shape) > 2:
fig = plt.gcf()
[ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_title(ax.get_title(), fontsize=fontsize) for ax in fig.axes]
[ax.tick_params(labelsize=fontsize) for ax in fig.axes]
else:
fig, ax = plt.gcf(), plt.gca()
if units == 'pct_change':
ax.set_xlabel('Price change [%]', fontsize=fontsize)
else:
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
cb = fig.axes[-1]
cbar = fig.colorbar(cb.collections[1], ticks=[0, 1],
aspect=50, pad=0.05)
cb.remove()
cbar.ax.set_yticklabels(['Low', 'High'], fontsize=fontsize)
cbar.set_label('Predictor value')
cbar.ax.tick_params(size=0)
cbar.outline.set_visible(False)
# cb.set_ylabel(cb.get_ylabel(), fontsize=fontsize)
# cb.tick_params(labelsize=fontsize)
fig.tight_layout()
return fig
def select_years_interaction_term(ds, regressor='SEI'):
regs = ['{}_{}'.format(x, regressor) for x in year_dummies]
ds = ds.sel(regressor=regs)
return ds
def produce_RF_abs_SHAP_all_years(path=ml_path, plot=True, mlr_shap=None,
units=None):
import xarray as xr
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
SVs, X_tests = load_all_yearly_shap_values(path)
k2s = []
for i, year in enumerate(np.arange(2000, 2020, 1)):
shap_df = SVs[i]
# shap_df.drop('year', axis=1, inplace=True)
X_test = X_tests[i]
# X_test.drop('year', axis=1, inplace=True)
k2 = produce_abs_SHAP_from_df(shap_df, X_test, plot=False)
k2['year'] = year
if mlr_shap is not None:
k2['Model'] = 'RF'
k2_mlr = mlr_shap[i]
k2_mlr['year'] = year
k2_mlr['Model'] = 'Hedonic'
k2_mlr = k2_mlr[k2_mlr['Predictor'].isin(best_regular1)]
k2 = pd.concat([k2, k2_mlr], axis=0)
k2s.append(k2)
abs_shap = pd.concat(k2s, axis=0)
abs_shap = abs_shap.reset_index(drop=True)
if plot:
sns.set_theme(style='ticks', font_scale=1.6)
fig, ax = plt.subplots(figsize=(17, 10))
abs_shap['year'] = pd.to_datetime(abs_shap['year'], format='%Y')
abs_shap = abs_shap[abs_shap['Predictor']!='New']
abs_shap = abs_shap[abs_shap['Predictor']!='Rooms']
# order:
order = ['SE (Socio-Economic Index)', 'BR (Building Rate)', 'DI (Distance to ECs)']
abs_shap['Predictor'] = abs_shap['Predictor'].map(vars_explained_plot_names)
abs_shap['SHAP_abs'] *= np.sign(abs_shap['Corr'])
if units == 'pct_change':
abs_shap['SHAP_abs'] = abs_shap['SHAP_abs'].apply(pct_change)
# order = ['Socio-Economic Index', 'Building rate', 'Distance to ECs']
if mlr_shap is not None:
sns.lineplot(data=abs_shap, x='year', y='SHAP_abs', hue='Predictor',
ax=ax, palette='Dark2', ci='sd', markers=True, linewidth=2,
hue_order=order, style='Model', markersize=10)
else:
sns.lineplot(data=abs_shap, x='year', y='SHAP_abs', hue='Predictor',
ax=ax, palette='Dark2', ci='sd', markers=True, linewidth=2,
hue_order=order, markersize=10)
if units == 'pct_change':
ax.set_ylabel('Price change [%]')
else:
ax.set_ylabel("mean |SHAP values|")
ax.set_xlabel('')
ax.grid(True)
h, la = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, la, ncol=2, loc='center')
sns.despine(fig)
fig.tight_layout()
return abs_shap
def produce_abs_SHAP_from_df(shap_df, X_test, plot=False):
import pandas as pd
shap_v = pd.DataFrame(shap_df)
feature_list = X_test.columns
shap_v.columns = feature_list
df_v = X_test.copy().reset_index()#.drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
if plot:
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(5, 6), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return k2
def ABS_SHAP(df_shap, df):
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.2)
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index()#.drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(5, 6), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return
def plot_simplified_shap_tree_explainer(rf_model):
import shap
rf_model.fit(X, y)
dfX = X.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO', 'co2': r'CO$_2$'}, axis=1)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
ABS_SHAP(shap_values_rf, dfX)
return
def convert_shap_values_to_pandas(shap_values, X_test):
import pandas as pd
SV = pd.DataFrame(shap_values)
SV.columns = X_test.columns
SV.index.name = 'sample'
return SV
def plot_Tree_explainer_shap(rf_model, X_train, y_train, X_test, samples=1000):
import shap
from shap.utils import sample
print('fitting...')
rf_model.fit(X_train, y_train)
# explain all the predictions in the test set
print('explaining...')
explainer = shap.TreeExplainer(rf_model)
# rename features:
X_test = X_test.rename(plot_names, axis=1)
if samples is not None:
print('using just {} samples out of {}.'.format(samples, len(X_test)))
shap_values = explainer.shap_values(sample(X_test, samples).values)
shap.summary_plot(shap_values, sample(X_test, samples))
SV = convert_shap_values_to_pandas(shap_values, sample(X_test, samples))
else:
shap_values = explainer.shap_values(X_test.values)
shap.summary_plot(shap_values, X_test)
SV = convert_shap_values_to_pandas(shap_values, X_test)
# shap.summary_plot(shap_values_rf, dfX, plot_size=1.1)
return SV
# def get_mean_std_from_df_feats(df, feats=best, ignore=['New', 'Rooms_345', 'Sale_year'],
# log=['Total_ends']):
# import numpy as np
# f = [x for x in best if x not in ignore]
# df1 = df.copy()
# if log is not None:
# df1[log] = (df1[log]+1).apply(np.log)
# mean = df1[f].mean()
# std = df1[f].std()
# return mean, std
def produce_rooms_new_years_from_ds_var(ds, dsvar='beta_coef', new_cat='Used/New',
new='New', old='Used'):
import numpy as np
import pandas as pd
df = ds[dsvar].to_dataset('year').to_dataframe().T
dfs = []
# 3 rooms old:
dff = df['const'].apply(np.exp).to_frame('Price')
dff['Rooms'] = 3
dff[new_cat] = old
dfs.append(dff)
# 3 rooms new:
dff = (df['const']+df['New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 3
dff[new_cat] = new
dfs.append(dff)
# 4 rooms old:
dff = (df['const']+df['Rooms_4']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 4
dff[new_cat] = old
dfs.append(dff)
# 4 rooms new:
dff = (df['const']+df['New']+df['Rooms_4']+df['Rooms_4_New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 4
dff[new_cat] = new
dfs.append(dff)
# 5 rooms old:
dff = (df['const']+df['Rooms_5']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 5
dff[new_cat] = old
dfs.append(dff)
# 5 rooms new:
dff = (df['const']+df['New']+df['Rooms_5']+df['Rooms_5_New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 5
dff[new_cat] = new
dfs.append(dff)
dff = pd.concat(dfs, axis=0)
dff['year'] = dff.index
return dff
def calculate_pct_change_for_long_ds_var(ds_var_long, year=2000):
d = ds_var_long.pivot(index='year', columns=[
'Rooms', 'Old/New'], values='Price')
d_ref = d.loc[year]
d /= d_ref
d -= 1
d *= 100
d['year']=d.index
df = d.melt(id_vars=['year'],value_name='Price')
return df
def calculate_period_pct_change_from_ds(ds, syear=2008, eyear=2019):
beta=produce_rooms_new_years_from_ds_var(ds,'beta_coef')
beta = beta.pivot(index='year', columns=['Rooms', 'Used/New'],
values='Price')
beta.columns = ['{}-{}'.format(rooms, new) for rooms, new in beta.columns]
pct = 100 * (beta.loc[eyear] - beta.loc[syear]) / beta.loc[syear]
return pct
def plot_price_rooms_new_from_new_ds(ds, add_cbs_index=False,
units='nis'):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from cbs_procedures import read_apt_price_index
from cbs_procedures import read_mean_salary
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
beta = produce_rooms_new_years_from_ds_var(ds, 'beta_coef')
# calculate pct change between 2008 and 2019:
pct = (beta.loc[2019,'Price'].values-beta.loc[2008,'Price'].values)/beta.loc[2008,'Price'].values
pct *= 100
beta1 = beta.copy()
beta1.loc[2019, 'pct_change_2019_2008'] = pct
print(beta1.loc[2019])
# calculate pct change Old/New in 2008:
pct=(beta[beta['Used/New']=='New'].loc[2008,'Price']-beta[beta['Used/New']=='Used'].loc[2008,'Price'])/beta[beta['Used/New']=='Used'].loc[2008,'Price']
pct *= 100
print(pct)
# calculate pct change Old/New in 2019:
pct=(beta[beta['Used/New']=='New'].loc[2019,'Price']-beta[beta['Used/New']=='Used'].loc[2019,'Price'])/beta[beta['Used/New']=='Used'].loc[2019,'Price']
pct *= 100
print(pct)
upper = produce_rooms_new_years_from_ds_var(ds, 'CI_95_upper')
lower = produce_rooms_new_years_from_ds_var(ds, 'CI_95_lower')
if units == 'pct_change':
beta = calculate_pct_change_for_long_ds_var(beta, 2000)
upper = calculate_pct_change_for_long_ds_var(upper, 2000)
lower = calculate_pct_change_for_long_ds_var(lower, 2000)
df = pd.concat([lower, beta, upper], axis=0)
if units == 'dollar':
# approx 4 NIS to 1 $ in whole 2000-2019
df['Price'] /= 4 * 1000 # price in thousands of $
ylabel = 'Apartment Price [Thousands $]'
elif units == 'nis':
ylabel = 'Apartment Price [millions NIS]'
df['Price'] /= 1e6
elif units == 'salary':
sal = read_mean_salary()
df = pd.merge(df, sal, on='year', how='inner')
df['Price'] /= df['mean_salary']
ylabel = 'Mean salary'
elif units == 'pct_change':
ylabel = 'Apartment price change from 2000 [%]'
df['year'] = pd.to_datetime(df['year'], format='%Y')
df = df.reset_index(drop=True)
sns.lineplot(data=df, x='year', y='Price', hue='Rooms', style='Used/New',
ax=ax, palette='tab10', ci='sd', markers=True, markersize=10)
ax.set_ylabel(ylabel)
ax.set_xlabel('')
if add_cbs_index:
cbs = read_apt_price_index(path=work_david, resample='AS',
normalize_year=2000)
cbs = cbs.loc['2000':'2019']
if units == 'pct_change':
cbs /= cbs.iloc[0]
cbs -= 1
cbs *= 100
cbs_label = 'Dwellings price index change from 2000 [%]'
cbs.columns = ['Apartment Price Index']
cbs['year'] = pd.to_datetime(cbs.index, format='%Y')
if units != 'pct_change':
twin = ax.twinx()
else:
twin = ax
sns.lineplot(data=cbs, x='year', y='Apartment Price Index', ax=twin,
color='k', linewidth=2)
twin.set_ylabel('Dwellings Price Index')
twin.set_xlabel('')
twin.set_ylim(50, 300)
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def plot_regular_feats_comparison_from_new_ds(ds,reg_name='Predictor',
feats=best_regular1, units='pct_change'):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
dfs = []
df = ds['beta_coef'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
df = ds['CI_95_upper'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
df = ds['CI_95_lower'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
dff = pd.concat(dfs, axis=0)
dff['regressor'] = dff['regressor'].map(vars_explained_plot_names)
dff = dff.rename({'regressor': reg_name}, axis=1)
dff['year'] = pd.to_datetime(dff['year'], format='%Y')
dff = dff.reset_index(drop=True)
if units == 'pct_change':
dff['value'] = dff['value'].apply(pct_change)
sns.lineplot(data=dff, x='year', y='value', hue=reg_name,
ax=ax, ci='sd', markers=True,
palette='Dark2')
if units == 'pct_change':
ylabel = 'Price change [%]'
else:
ylabel = r'Standardized $\beta$s'
ax.set_ylabel(ylabel)
ax.set_xlabel('')
h, l = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, l, ncol=1, title='Predictor', loc='center')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return dff
def prepare_new_X_y_with_year(df, year=2000, y_name='Price', features=best1):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
# m, s = get_mean_std_from_df_feats(df)
X, y, scaler = produce_X_y(
df, y_name=y_name, year=year, feats=features, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# X[best_regular] -= m
# X[best_regular] /= s
# regular vars vs. time (years):
# X1 = return_X_with_interaction(X, ['trend'], best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
# X3 = return_X_with_interaction(X, ['trend'], room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X, X2],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def prepare_new_X_y(df, y_name='Price'):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
X, y, scaler = produce_X_y(
df, y_name=y_name, year=None, feats=best_years, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# regular vars vs. time (years):
X1 = return_X_with_interaction(X, year_dummies, best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
# X3 = return_X_with_interaction(X, year_dummies, room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X1, X2],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def prepare_new_X_y_with_trend(df, y_name='Price'):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
X, y, scaler = produce_X_y(
df, y_name=y_name, year='trend', feats=best, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# regular vars vs. time (years):
X1 = return_X_with_interaction(X, ['trend'], best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
X3 = return_X_with_interaction(X, ['trend'], room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X1, X2, X3],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def get_design_with_pair_interaction(data, group_pair):
""" Get the design matrix with the pairwise interactions
Parameters
----------
data (pandas.DataFrame):
Pandas data frame with the two variables to build the design matrix of their two main effects and their interaction
group_pair (iterator):
List with the name of the two variables (name of the columns) to build the design matrix of their two main effects and their interaction
Returns
-------
x_new (pandas.DataFrame):
Pandas data frame with the design matrix of their two main effects and their interaction
"""
import pandas as pd
import itertools
x = pd.get_dummies(data[group_pair])
interactions_lst = list(
itertools.combinations(
x.columns.tolist(),
2,
),
)
x_new = x.copy()
for level_1, level_2 in interactions_lst:
if level_1.split('_')[0] == level_2.split('_')[0]:
continue
x_new = pd.concat(
[
x_new,
x[level_1] * x[level_2]
],
axis=1,
)
x_new = x_new.rename(
columns = {
0: (level_1 + '_' + level_2)
}
)
return x_new
def calculate_distance_from_gdf_to_employment_centers(gdf, path=work_david, n=4,
weights='Pop2020', inverse=None,
x_coord_name='ITM-E', y_coord_name='ITM-N'):
from cbs_procedures import read_emploment_centers_2008
import numpy as np
gdf = gdf[~gdf[x_coord_name].isnull()]
gdf = gdf[~gdf[y_coord_name].isnull()]
def mean_distance_to_n_mokdim(x, weights=None):
# x = gdf['geometry']
dists = points.distance(x).to_frame('distance')
dists['Pop2020'] = points['Pop2020'] / 1000
dists = dists.sort_values('distance')
if inverse is not None:
dists['distance'] = dists['distance']**inverse
# return dists['distance'].mean()
if weights is None:
mean_dist = dists.iloc[0:n].mean()
else:
mean_dist = np.average(
dists.iloc[0:n]['distance'], weights=dists.iloc[0:n][weights])
return mean_dist.item()
points = read_emploment_centers_2008(path, shape=True)
if n is not None:
gdf['mean_distance_to_{}_mokdim'.format(n)] = gdf['geometry'].apply(
mean_distance_to_n_mokdim, weights=weights)
else:
for i, row in points.iterrows():
print('calculating distance to {}.'.format(row['NameHE']))
name = 'kms_to_{}'.format(i)
gdf[name] = gdf.distance(row['geometry']) / 1000.0
return gdf
def create_total_inout_timeseries_from_migration_network_and_cbs():
from cbs_procedures import read_yearly_inner_migration
from Migration_main import read_all_multi_year_gpickles
from Migration_main import produce_nodes_time_series
Gs = read_all_multi_year_gpickles()
da = produce_nodes_time_series(Gs)
df_in = da.sel(parameter='total_in').reset_coords(
drop=True).to_dataset('node').to_dataframe()
df_out = da.sel(parameter='total_out').reset_coords(
drop=True).to_dataset('node').to_dataframe()
df = read_yearly_inner_migration()
inflow = df[df['year'] == 2018][[
'city_code', 'inflow']].set_index('city_code').T
inflow = inflow.append(
df[df['year'] == 2019][['city_code', 'inflow']].set_index('city_code').T)
inflow.index = [2018, 2019]
inflow.index.name = 'time'
inflow.columns.name = ''
inflow.columns = [str(x) for x in inflow.columns]
outflow = df[df['year'] == 2018][[
'city_code', 'outflow']].set_index('city_code').T
outflow = outflow.append(
df[df['year'] == 2019][['city_code', 'outflow']].set_index('city_code').T)
outflow.index = [2018, 2019]
outflow.index.name = 'time'
outflow.columns.name = ''
outflow.columns = [str(x) for x in outflow.columns]
df_in = df_in.append(inflow)
df_out = df_out.append(outflow)
return df_in, df_out
def prepare_features_and_save(path=work_david, savepath=None):
from nadlan_EDA import load_nadlan_combined_deal
from cbs_procedures import read_school_coords
from cbs_procedures import read_kindergarten_coords
from cbs_procedures import read_historic_SEI
from cbs_procedures import read_building_starts_ends
from cbs_procedures import calculate_building_rates
from Migration_main import path_glob
from cbs_procedures import calculate_minimum_distance_between_two_gdfs
import numpy as np
import pandas as pd
def add_bgr_func(grp, bgr, name='3Rooms_starts'):
# import numpy as np
year = grp['Sale_year'].unique()[0]
cc = grp['city_code'].unique()[0]
try:
if bgr.columns.dtype == 'object':
gr = bgr.loc[year, str(cc)]
elif bgr.columns.dtype == 'int':
gr = bgr.loc[year, cc]
except KeyError:
gr = np.nan
grp[name] = gr
return grp
df = load_nadlan_combined_deal(
add_bgr=None, add_geo_layers=False, return_XY=True)
# add distances to kindergarden, schools, building rates for each room type etc.
print('Adding Building Growth rate.')
bdf = read_building_starts_ends()
for room in ['3rooms', '4rooms', '5rooms', 'Total']:
room_begins = calculate_building_rates(
bdf, phase='Begin', rooms=room, fillna=False)
room_ends = calculate_building_rates(
bdf, phase='End', rooms=room, fillna=False)
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, room_begins, name='{}_starts'.format(room))
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, room_ends, name='{}_ends'.format(room))
# df.loc[df['{}_starts'.format(room)] == 0] = np.nan
# df.loc[df['{}_ends'.format(room)] == 0] = np.nan
print('Adding minimum distance to kindergartens.')
kinder = read_kindergarten_coords()
df = df.groupby('Sale_year').apply(
calculate_minimum_distance_between_two_gdfs, kinder, 'kindergarten')
df.index = df.index.droplevel(0)
df = df.reset_index(drop=True)
print('Adding minimum distance to schools.')
school = read_school_coords()
df = df.groupby('Sale_year').apply(
calculate_minimum_distance_between_two_gdfs, school, 'school')
df.index = df.index.droplevel(0)
df = df.reset_index(drop=True)
print('Adding historic city-level SEI.')
sei = read_historic_SEI()
sei.loc[2018] = sei.loc[2017]
sei.loc[2019] = sei.loc[2017]
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, sei, name='SEI')
# add inflow and outflow:
print('Adding Inflow and Outflow')
dfi, dfo = create_total_inout_timeseries_from_migration_network_and_cbs()
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, dfi, name='Inflow')
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, dfo, name='Outflow')
# finally drop some cols so saving will not take a lot of space:
df = df.drop(['P2015_cluster2', 'Parcel_Lot', 'Sale_Y_Q', 'Sale_quarter', 'Sale_month', 'District_HE', 'm2_per_room',
'StatArea_ID', 'Building', 'street_code', 'Street', 'ObjectID', 'TREND_FORMAT', 'TREND_IS_NEGATIVE', 'POLYGON_ID'], axis=1)
if savepath is not None:
filename = 'Nadaln_with_features.csv'
df.to_csv(savepath/filename, na_rep='None', index=False)
print('{} was saved to {}.'.format(filename, savepath))
return df
def calc_vif(X, dropna=True, asfloat=True, remove_mean=True):
import pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor
if dropna:
print('dropping na.')
X = X.dropna()
if asfloat:
print('considering as float.')
X = X.astype(float)
if remove_mean:
X = X - X.mean()
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(
X.values, i) for i in range(X.shape[1])]
return(vif)
def interpert_beta_coefs(ds, name='beta_coef', dummies=dummies):
import numpy as np
import xarray as xr
ds1 = ds[name].to_dataset('regressor')
if len(ds1.dims) == 0:
df = ds1.expand_dims('dumm').to_dataframe()
else:
df = ds1.to_dataframe()
betas = []
# interpet dummy variables:
for dummy in dummies:
print('interperting {} variable.'.format(dummy))
ser = 100*(np.exp(df[dummy])-1)
da = ser.to_xarray()
betas.append(da)
# interpet regular log variables:
# for every 10% change in var, the predicted log var is changed...:
regulars = [x for x in ds['regressor'].values if x not in dummies]
if 'const' in regulars:
regulars.remove('const')
if 'dumm' in regulars:
regulars.remove('dumm')
for regular in regulars:
print('interperting {} variable.'.format(regular))
ser = 100*(1.1**df[regular]-1)
da = ser.to_xarray()
betas.append(da)
# now, the constant is the geometric mean of the Price:
da = np.exp(df['const']).to_xarray()
betas.append(da)
beta = xr.merge(betas)
try:
beta = beta.to_array('regressor').drop('dumm')
except ValueError:
beta = beta.to_array('regressor')
# beta = beta.sortby(ds['regressor'])
ds['{}_inter'.format(name)] = beta.transpose().squeeze()
return ds
def scale_log(df, cols=None, plus1_cols=None):
import numpy as np
import pandas as pd
if cols is None:
df_scaled = df.copy()
for col in df.columns:
if plus1_cols is None:
df_scaled[col] = df[col].apply(np.log)
else:
print('{} is scaled using log(x+1)!'.format(col))
df_scaled[col] = (df[col]+1).apply(np.log)
else:
print('scaling only {} cols.'.format(cols))
df_sliced = df[cols]
df_scaled = df_sliced.copy()
for col in df_sliced.columns:
if plus1_cols is None:
df_scaled[col] = df_sliced[col].apply(np.log)
else:
print('{} is scaled using log(x+1)!'.format(col))
df_scaled[col] = (df[col]+1).apply(np.log)
df_rest = df[[x for x in df.columns if x not in cols]]
df_scaled = pd.concat([df_scaled, df_rest], axis=1)
df_scaled = df_scaled[[x for x in df.columns]]
return df_scaled
def scale_df(df, scaler, cols=None):
import pandas as pd
print('using {} scaler.'.format(scaler.__repr__()))
if cols is None:
scaled_vals = scaler.fit_transform(df)
df_scaled = pd.DataFrame(scaled_vals)
df_scaled.columns = df.columns
else:
print('scaling only {} cols.'.format(cols))
df_sliced = df[cols]
scaled_vals = scaler.fit_transform(df_sliced)
df_scaled = pd.DataFrame(scaled_vals)
df_scaled.columns = cols
df_rest = df[[x for x in df.columns if x not in cols]]
df_scaled = pd.concat([df_scaled, df_rest], axis=1)
df_scaled = df_scaled[[x for x in df.columns]]
return df_scaled, scaler
def load_nadlan_with_features(path=work_david, years=[2000, 2019], asset_type=apts,
mokdim_version=False):
import pandas as pd
from cbs_procedures import read_price_index
from nadlan_procedures import remove_outlier_by_value_counts
def add_inflation_func(grp, pi,name='Price_inflation_fixed'):
year = grp['Sale_year'].unique()[0]
weight = pi.loc[str(year)].item()
grp[name] = weight*grp['Price']
return grp
if mokdim_version:
df = pd.read_csv(
path/'Nadaln_with_features_and_distance_to_employment_centers.csv', na_values='None')
else:
df = pd.read_csv(path/'Nadaln_with_features.csv', na_values='None')
print('sclicing to {} - {}.'.format(years[0], years[1]))
df = df.loc[(df['Sale_year'] >= years[0]) &
(df['Sale_year'] <= years[1])]
print('choosing {} only.'.format(asset_type))
df = df[df['Type_of_asset'].isin(asset_type)]
print('adding to floor number.')
floor1 = df.loc[(~df['Another_floor_1'].isnull())]['Another_floor_1']
df.loc[floor1.index, 'Floor_number'] = floor1.values
print('adding Netflow')
df['Netflow'] = df['Inflow']-df['Outflow']
# shift the 0 of netflow to +10000 so that i could
# use np.log afterwards in features preproccesing
# also shift the SEI +3 for the log:
# df['SEI'] += 3
# now use price inflation fixing:
# pi = read_price_index()['price_index_without_housing']
# pi = 100 / pi
# df = df.groupby('Sale_year').apply(add_inflation_func, pi)
#create linear trend from dt:
df = df.set_index(pd.to_datetime(df['Date'])).sort_index()
df['trend'] = df.index.to_julian_date()
df['trend'] -= df['trend'].iloc[0]
df['trend'] /= df['trend'].iloc[-1]
# remove number of rooms that are rare:
df = remove_outlier_by_value_counts(df, 'Rooms', thresh=900)
# remove outliers in rooms:
df = remove_outlier_area_per_room(df, col='Area_m2', k=1.5)
# remove 1 rooms with area > 50 m^2:
d = df[df['Rooms']==1]['Area_m2'].dropna()
inds = d[d>50].index
df = df.drop(inds, axis=0)
df = df.reset_index(drop=True)
return df
def create_bootstrapped_samples_for_each_city_code_and_year(df, cols=best_for_bs,
min_deals=500,
min_years=20,
n_items=400,
n_samples=5):
import pandas as pd
df2 = df[cols].dropna()
df2 = df2.reset_index(drop=True)
df1 = filter_df_by_minimum_deals_per_year(
df2, min_deals=min_deals, min_years=min_years, col='Price')
years = [x for x in df1.columns]
cities = [x for x in df1.index]
dfs = []
cnt = 0
for year in years:
for city in cities:
for i in range(n_samples):
df3 = df2[(df2['city_code'] == city) & (df2['Sale_year'] == year)].sample(n=n_items,
replace=False,
random_state=cnt)
cnt += 1
dfs.append(df3)
dff = pd.concat(dfs, axis=0)
dff = dff.reset_index(drop=True)
return dff
def filter_df_by_minimum_deals_per_year(df, min_deals=200, min_years=20, col='Price'):
df1 = df.groupby(['city_code', 'Sale_year'])[col].count().unstack()
n_total_cities = len(df1)
print('original number of cities: ', n_total_cities)
df1 = df1[df1.count(axis=1) == min_years]
n_years_cities = len(df1)
print('number of cities with {} years total: '.format(
min_years), n_years_cities)
df1 = df1[df1 >= min_deals].dropna()
n_deals_cities = len(df1)
print('number of cities with minimum {} deals: '.format(
min_deals), n_deals_cities)
# sort:
df1 = df1.sort_values(by=[x for x in df1.columns], axis=0, ascending=False)
return df1
def convert_statsmodels_object_results_to_xarray(est):
import pandas as pd
import xarray as xr
# get main regression results per predictor:
t1 = est.summary().tables[1].as_html()
t1 = pd.read_html(t1, header=0, index_col=0)[0]
t1.columns = ['beta_coef', 'std_err', 't',
'P>|t|', 'CI_95_lower', 'CI_95_upper']
t1.index.name = 'regressor'
# get general results per all the data:
t0 = est.summary().tables[0].as_html()
t0 = pd.read_html(t0, header=None)[0]
t0_ser1 = t0.loc[:, [0, 1]].set_index(0)[1]
t0_ser1.index.name = ''
t0_ser2 = t0.loc[:, [2, 3]].set_index(2)[3].dropna()
t0_ser2.index.name = ''
t0 = pd.concat([t0_ser1, t0_ser2])
t0.index = t0.index.str.replace(':', '')
t2 = est.summary().tables[2].as_html()
t2 = | pd.read_html(t2, header=None) | pandas.read_html |
# Author: <NAME>
# Github: Data-is-Life
# Date: 10/01/2018
import re
import pandas as pd
def rename_columns(strs_to_replace):
'''Keeping Dataframe heading formating consistant by converting all values
to standardized format that is easy to trace back. If left unformatted,
there could be duplicate columns with the same values and it would make it
far more challenging to search for homes.'''
modified_list = []
for num in strs_to_replace:
modified_list.append(num.replace('Redfin Estimate', 'redfin_est'
).replace(
'Beds', 'num_bdrs').replace('beds', 'num_bts').replace(
'Baths', 'num_bts').replace('$', 'price').replace(
'Built: ', 'yr_blt').lower().replace('__', '_').replace(
' ', '_').replace(':_', '').replace(':', '').replace(
'.', '').replace('sqft', 'sq_ft').replace('_(', '_').replace(
'(', '_').replace(')', '').replace(',', '').replace(
'minimum', 'min').replace('maximum', 'max').replace(
'bedrooms', 'beds').replace('bathrooms', 'baths').replace(
'#_of_', 'num_').replace('sq. ft.', 'sqft'))
return modified_list
def top_info_parser(soup):
'''Starting with getting the information at the very top of the page.
This takes information from the top of the page that highlights the main
attributes of the home, including latitude and longitude.'''
all_top = soup.findAll('div', {'class': 'HomeInfo inline-block'})
top_info_dict = {}
values_ = []
cats_ = []
sqft = []
lat_lon = []
for num in all_top:
# Getting the address
address_ = num.findAll('span', {'class': 'street-address'})
top_info_dict['address'] = [num.text for num in address_][0]
# Getting the city
city_ = num.findAll('span', {'class': 'locality'})
top_info_dict['city'] = [num.text for num in city_][0]
# Getting the state (maybe not needed?)
state_ = num.findAll('span', {'class': 'region'})
top_info_dict['state'] = [num.text for num in state_][0]
# Getting the zip-code
zip_code_ = num.findAll('span', {'class': 'postal-code'})
top_info_dict['zip_code'] = [num.text for num in zip_code_][0]
'''Getting the Redfin Estimate. This is important, since if the home
was sold a few months ago, the search should focus on the homes current
value and not for what it sold for. This make the results far more
efficiant.'''
red_est = num.findAll('div', {'class': 'info-block avm'})
for i in red_est:
values_.append(i.div.text)
cats_.append(i.span.text)
# If the Redfin estimate is not available, this is the fall back option.
price_ = num.findAll('div', {'class': 'info-block price'})
for i in price_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting number of bedrooms
bdrs_ = num.findAll('div', {'data-rf-test-id': 'abp-beds'})
for i in bdrs_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting number of bathrooms
bths_ = num.findAll('div', {'data-rf-test-id': 'abp-baths'})
for i in bths_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting size of the home
sqft_ = num.findAll('div', {'data-rf-test-id': 'abp-sqFt'})
for i in sqft_:
top_info_dict['sqft'] = i.span.text[:6]
# Getting the year the home was built in
yrblt_ = num.findAll('div', {'class': 'HomeBottomStats'})
for i in yrblt_:
lbls_ = i.findAll('span', {'class': 'label'})
vals_ = i.findAll('span', {'class': 'value'})
for j in lbls_:
cats_.append(j.text)
for k in vals_:
values_.append(k.text)
# Getting latitude and longitude of the home
lat_lon_ = num.findAll('span', {'itemprop': 'geo'})
for i in lat_lon_:
ll_ = i.findAll('meta')
for num in ll_:
lat_lon.append(num['content'])
if len(lat_lon) >= 2:
top_info_dict['latitude'] = lat_lon[0]
top_info_dict['longitude'] = lat_lon[1]
# Checking to make sure the values are present for the fields
# If they are not avaialble, get rid of them.
values_ = [num for num in values_ if num != '—']
cats_ = [num for num in cats_ if num != '—']
# Putting everything in a dictionary, since it removes redundant columns
info_dict = dict(zip(cats_, values_))
# Merging the two dictionaries
all_info_dict = {**top_info_dict, **info_dict}
# Getting the home description
home_description = soup.find('p', {'class': 'font-b1'})
if home_description is not None:
all_info_dict['description'] = home_description.span.text
else:
all_info_dict['description'] = 'N/A'
return all_info_dict
def public_info_parser(soup):
'''Getting information from tax sources to ensure all the home information
matches from Zillow, Agent, and Tax records.'''
all_info = soup.findAll('div', {'data-rf-test-id': 'publicRecords'})
label_list = []
values_list = []
for num in all_info:
cats = num.findAll('span', {'class': 'table-label'})
for i in cats:
label_list.append(i.text)
for num in all_info:
vals = num.findAll('div', {'class': 'table-value'})
for i in vals:
values_list.append(i.text)
public_info_dict = dict(zip(label_list, values_list))
return public_info_dict
def school_parser(soup):
''' Getting schools and the grades they attend with their score from
GreatSchools this will be added as a feature for homes bigger than
three bedrooms and all single family homes.'''
school_dict = {}
school_info = soup.findAll('div', {'class': "name-and-info"})
school_names = []
school_grades = []
school_ratings = []
for num in school_info:
s_name = num.findAll('div', {'data-rf-test-name': 'school-name'})
s_grade = num.findAll('div', {'class': re.compile('^sub-info')})
s_rating = num.findAll('div', {'class': 'gs-rating-row'})
for i in s_name:
school_names.append(i.text)
for j in s_grade:
school_grades.append(j.text.replace(
' • Serves this home', '').replace(' • ', ' - '))
for k in s_rating:
school_ratings.append(
k.text[-5:].replace(' ', '').replace('/10', ''))
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and ((
('k' in school_grades[w] or 'Pre' in school_grades)
or '5' in school_grades[w]) or 'Elementary' in school_names[w])):
school_dict['elem_school_name'] = school_names[w]
school_dict['elem_school_grades'] = school_grades[
w].split(' - ', 1)[1]
school_dict['elem_school_rating'] = school_ratings[w]
w += 1
else:
w += 1
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and ((
('7' in school_grades[w] or '8' in school_grades)
or 'Middle' in school_names[w]) or 'Junior' in school_names[w])):
school_dict['middle_school_name'] = school_names[w].title()
school_dict['middle_school_grades'] = school_grades[
w].split(' - ', 1)[1].title()
school_dict['middle_school_rating'] = school_ratings[w].title()
w += 1
else:
w += 1
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and (
('12' in school_grades or 'High' in school_names[w]))):
school_dict['high_school_name'] = school_names[w].title()
school_dict['high_school_grades'] = school_grades[
w].split(' - ', 1)[1].title()
school_dict['high_school_rating'] = school_ratings[w].title()
w += 1
else:
w += 1
if 'elem_school_name' not in school_dict.keys():
school_dict['elem_school_name'] = 'N/A'
school_dict['elem_school_grades'] = 'N/A'
school_dict['elem_school_rating'] = 'N/A'
if 'middle_school_name' not in school_dict.keys():
school_dict['middle_school_name'] = 'N/A'
school_dict['middle_school_grades'] = 'N/A'
school_dict['middle_school_rating'] = 'N/A'
if 'high_school_name' not in school_dict.keys():
school_dict['high_school_name'] = 'N/A'
school_dict['high_school_grades'] = 'N/A'
school_dict['high_school_rating'] = 'N/A'
return school_dict
def feats_parser(soup):
'''All the listed features by the agent/broker inputting the listing
on the MLS.'''
all_home_feats = soup.findAll('span', {'class': "entryItemContent"})
feat_cats = []
feat_vals = []
for num in all_home_feats:
feat_cats.append(num.contents[0])
for num in all_home_feats:
feat_vals.append(num.span.text)
cats_set = set(feat_cats)
vals_set = set(feat_vals)
redundant = cats_set & vals_set
for num in redundant:
feat_cats.remove(num)
feat_vals.remove(num)
feat_cats = [str(num) for num in feat_cats]
feat_vals = [str(num) for num in feat_vals]
feats_dict = dict(zip(feat_cats, feat_vals))
extra_feats = []
for k, v in feats_dict.items():
if 'span>' in k:
extra_feats.append(k)
for num in extra_feats:
if num in feats_dict.keys():
feats_dict.pop(num)
# This is to replace all the HTML tags
extra_feats = [num.replace('<span>', '').replace('</span>', '').replace(
'<a href=', '').replace('"', '').replace(' rel=nofollow', '').replace(
' target=_blank>', '').replace('Virtual Tour (External Link)', '').replace(
'</a', '').replace('>', '').replace('&', '&').replace('(s)', '') for num
in extra_feats]
x_feat_string = ', '.join([num for num in extra_feats])
x_feat_string = x_feat_string.split(sep=', ')
x_feat_list = list(set(x_feat_string))
feats_dict['extra_feats'] = ', '.join([num for num in x_feat_list])
return feats_dict
def additional_info(soup):
'''Need to get additional information, so we don't miss anything that
could prove to be critical later.'''
cats_ = soup.findAll('span', {'class': re.compile('^header ')})
cats_ = [num.text for num in cats_]
vals_ = soup.findAll('span', {'class': re.compile('^content ')})
vals_ = [num.text for num in vals_]
cats_ = [str(num).replace('Property Type', 'prop_type').replace(
'HOA Dues', 'hoa_fees').replace('Type', 'prop_type') for num in cats_]
vals_ = [str(num).replace('$', '').replace('/month', '').replace(
'Hi-Rise', 'Condo').replace('Residential', 'Single Family Residence')
for num in vals_]
return dict(zip(cats_, vals_))
def info_from_property(soup):
''' Putting all the information together in a Dataframe and removing any
duplicate columns.'''
top_info_dict = top_info_parser(soup)
public_info_dict = public_info_parser(soup)
school_dict = school_parser(soup)
all_home_feats = feats_parser(soup)
mid_info_feats = additional_info(soup)
df1 = pd.DataFrame(top_info_dict, index=[1])
df2 = pd.DataFrame(public_info_dict, index=[1])
df3 = pd.DataFrame(school_dict, index=[1])
df4 = pd.DataFrame(all_home_feats, index=[1])
df5 = pd.DataFrame(mid_info_feats, index=[1])
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import DatetimeIndex, MultiIndex
from pandas._libs import hashtable
from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
mi = MultiIndex.from_tuples(t)
assert not mi.has_duplicates
def test_has_duplicates_overflow():
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
mi = MultiIndex(levels=levels, labels=labels)
assert not mi.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
mi = MultiIndex(levels=levels, labels=labels)
else:
values = mi.values.tolist()
mi = MultiIndex.from_tuples(values + [values[0]])
assert mi.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
@pytest.mark.parametrize('keep, expected', [
('first', np.array([False, False, False, True, True, False])),
('last', np.array([False, True, True, False, False, False])),
(False, np.array([False, True, True, True, True, False]))
])
def test_duplicated(idx_dup, keep, expected):
result = idx_dup.duplicated(keep=keep)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
def test_duplicated_large(keep):
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
result = mi.duplicated(keep=keep)
expected = hashtable.duplicated_object(mi.values, keep=keep)
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
import requests
import json
import pandas as pd
import datetime as dt
def get_binance_bars(symbol, interval, startTime, endTime):
url = "https://api.binance.com/api/v3/klines" #where are you getting the data from (read docs)
startTime = str(int(startTime.timestamp() * 1000))
endTime = str(int(endTime.timestamp() * 1000))
limit = '1000'
req_params = {"symbol": symbol, 'interval': interval, 'startTime': startTime, 'endTime': endTime, 'limit': limit}
df = pd.DataFrame(json.loads(requests.get(url, params=req_params).text))
if (len(df.index) == 0):
return None
df = df.iloc[:, 0:6]
df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']
df.open = df.open.astype("float")
df.high = df.high.astype("float")
df.low = df.low.astype("float")
df.close = df.close.astype("float")
df.volume = df.volume.astype("float")
df['adj_close'] = df['close']
df.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in df.datetime]
return df
months = [dt.datetime(2020, i, 1) for i in range(1, 13)]
months.append(dt.datetime(2021, 1, 1))
# Change ticker(BTCUSDT) and interval(1m) to whichever you want
df_list = [get_binance_bars('BTCUSDT', '1m', months[i], months[i+1] - dt.timedelta(0, 1)) for i in range(0, len(months) - 1)]
# Concatenate list of dfs in 1 df
df = pd.concat(df_list)
df_list = []
last_datetime = dt.datetime(2017, 9, 1)
while True:
print(last_datetime)
# Change ticker(BTCUSDT) and interval(1m) to whatever you want
new_df = get_binance_bars('BTCUSDT', '1m', last_datetime, dt.datetime.now())
if new_df is None:
break
df_list.append(new_df)
last_datetime = max(new_df.index) + dt.timedelta(0, 1)
df = | pd.concat(df_list) | pandas.concat |
#!/usr/bin/env python
# coding=utf-8
"""
A highly optimized class for bill of materials consolidation and costing
"""
from __future__ import absolute_import
from pandas import DataFrame
from six import iteritems
import os
import sys
DOCKER = os.environ.get('DOCKER') in ('transform', 'ipython')
if DOCKER:
sys.path.append('/var/vol/code')
__author__ = "<NAME>"
__copyright__ = "© Copyright 2015, Tartan Solutions, Inc"
__credits__ = ["<NAME>"]
__license__ = "Proprietary"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class Node(object):
"""A BOM hierarchy node object"""
def __init__(self, parent_obj, child_id, makeup_volume=1):
"""Initializes node object
:param parent_obj: parent object
:type parent_obj: object
:param child_id: child node key
:type child_id: str or unicode
:param consolidation: consolidation type +, -, or ~
:type consolidation: str or unicode
:returns: None
:rtype: None
"""
self.id = child_id
self.parents = []
self.children = {}
self.cost = None
self.override_cost = None
# Set the reference to an assembly
if parent_obj is not None:
parent_obj.add_child(self, makeup_volume)
# Create a reference so each child know who uses it
if parent_obj not in self.parents:
self.parents.append(parent_obj)
def __repr__(self):
return "<(Node ID: {} ({})>".format(self.id, self.get_cost())
def get_cost(self):
if self.cost is None:
# Need to calculate the cost
self.calculate_cost()
return self.cost
def set_cost(self, value):
self.cost = value
def set_override_cost(self, value):
self.override_cost = value
def add_child(self, child_obj, makeup_volume):
"""Add a child to the node
:param child_obj: node object to add
:type child_obj: object
:param h: hierarchy - main or name of alt hierarchy
:type h: str or unicode
:returns: None
:rtype: None
"""
# Add the child to this parent's list of children
# Also handles a makeup volume change
self.children[child_obj] = makeup_volume
def remove_child(self, child_id):
"""Removes child from node
:param child_id: child node key to remove
:type child_id: str or unicode
:returns: None
:rtype: None
"""
current_children = self.get_children()
temp_children = {}
for c in current_children:
if c.id != child_id:
temp_children[c] = current_children[c]
# The new list should be missing the child.
self.children = temp_children
def get_parents(self):
"""Returns parent object of node
:returns: Parent object
:rtype: object
"""
return self.parents
def get_siblings(self, parent_id):
"""Finds siblings of the node
:returns: list of siblings node objects including current node
:rtype: list
"""
for p in self.parents:
if p.id == parent_id:
return p.get_children()
def get_children(self):
"""Returns list of children node objects
:returns: list of child node objects
:rtype: list
"""
return self.children
def is_child_of(self, parent_id):
"""Checks if the node is a child of the specified parent
:param parent_id: parent node key
:type parent_id: str or unicode
:returns: True if node descends from the parent
:rtype: bool
"""
for p in self.parents:
if p.id == parent_id:
return True
return False
def is_parent_of(self, child_id):
"""Checks if the node is a parent of the specified child
:param child_id: child node key
:type child_id: str or unicode
:returns: True if child descends from the node
:rtype: bool
"""
for c in self.get_children():
if c.id == child_id:
return True
return False
def calculate_cost(self):
"""Calculates the roll-up cost of this node based on
the costs of sub-components
:returns: None
:rtype: None
"""
if self.override_cost is None:
# Ask all children for their costs and multiply by makeup volume
# This will invoke a recursive request for costs down to the
# lowest level component
cost = 0
for c in self.children:
makeup_volume = self.children[c]
if makeup_volume != 0:
child_cost = c.get_cost()
makeup_cost = child_cost * self.children[c]
cost += makeup_cost
self.cost = cost
else:
# An Override cost has been supplied
# DO NOT calculate the cost, just use the override
self.cost = self.override_cost
def reset_cost(self):
self.cost = None
class BOM(object):
"""BOM Hierarchy Class for fast BOM hierarchy operations"""
def __init__(self, load_path=None):
"""Class init function sets up basic structure
:param load_path: optional path to saved hierarchy load file to load initially
:type load_path: str or unicode
:returns: None
:rtype: None
"""
self.h_direct = {}
self.h_children = {}
self.clear()
if load_path is not None:
self.load(load_path)
def add_node(self, parent_id, child_id, makeup_volume=1):
"""Adds a node to the main hierarchy
:param parent_id: parent node key
:type parent_id: str or unicode
:param child_id: child node key
:type child_id: str or unicode
:param consolidation: consolidation type +, -, or ~
:type consolidation: str or unicode
:returns: None
:rtype: None
"""
try:
parent_obj = self.get_node(parent_id)
except:
# Parent does not exist yet. Handle out of sequence data gracefully.
root_parent = self.get_node('root')
parent_obj = Node(root_parent, parent_id)
self.h_direct[parent_id] = parent_obj
if child_id in self.h_direct:
# This already exists.
node = self.h_direct[child_id]
parent_obj.add_child(node, makeup_volume)
else:
# Doesn't exist. Simple add.
node = Node(parent_obj, child_id, makeup_volume)
self.h_direct[child_id] = node
def delete_node(self, node_id):
"""Deletes the node and removes all aliases and properties
:param node_id: node key
:type node_id: str or unicode
:returns: None
:rtype: None
"""
# Delete the node and index reference
try:
parents = self.get_parents(node_id)
except:
# Not present. No need to do anything
pass
else:
# Remove from main hierarchy
del self.h_direct[node_id]
for p in parents:
p.remove_child(node_id)
def get_node(self, node_id):
"""Gets the node object
:param node_id: node key
:type node_id: str or unicode
:returns: Node object
:rtype: object
"""
try:
return self.h_direct[node_id]
except:
raise Exception('No node found with the name %s' % node_id)
def reset_costs(self):
"""Resets all costs to uncalculated value
:returns: None
:rtype: None
"""
for node_id in self.h_direct:
self.h_direct[node_id].reset_cost()
def set_cost(self, node_id, value):
self.h_direct[node_id].set_cost(value)
def set_override_cost(self, node_id, value):
self.h_direct[node_id].set_override_cost(value)
def get_all_costs(self):
"""Gets the cost of all nodes
:returns: node cost
:rtype: pandas.DataFrame
"""
final = []
for node_id in self.h_direct:
temp = (node_id, self.h_direct[node_id].get_cost())
final.append(temp)
headers = ['node', 'cost']
df = DataFrame(final, columns=headers)
return df
def get_parents(self, node_id):
"""Finds parent of node
:param node_id: node key
:type node_id: str or unicode
:returns: node object of parent
:rtype: object
"""
return self.get_node(node_id).get_parents()
def get_parent_ids(self, node_id):
"""Finds parent of node
:param node_id: node key
:type node_id: str or unicode
:returns: node key of parent
:rtype: str or unicode
"""
try:
parents = self.get_parents(node_id)
return [p.id for p in parents]
except:
return None
def get_siblings(self, node_id, parent_id):
"""Finds sibling nodes of specified node
:param node_id: node key
:type node_id: str or unicode
:returns: node objects of all siblings including the current node
:rtype: list
"""
return self.get_node(node_id).get_siblings(parent_id)
def get_sibling_ids(self, node_id, parent_id):
"""Finds sibling nodes of specified node
:param node_id: node key
:type node_id: str or unicode
:returns: node keys of all siblings including the current node
:rtype: list
"""
objs = self.get_siblings(node_id, parent_id)
return [o.id for o in objs]
def get_children(self, node_id):
"""Finds children of node
:param node_id: node key
:type node_id: str or unicode
:returns: list of children node objects
:rtype: list
"""
return self.get_node(node_id).get_children()
def get_children_ids(self, node_id):
"""Finds children of node
:param node_id: node key
:type node_id: str or unicode
:returns: list of children node keys
:rtype: list
"""
objs = self.get_children(node_id)
return [o.id for o in objs]
def is_child_of(self, node_id, parent_id):
"""Check if node is a child of the parent node
:param node_id: child node key
:type node_id: str or unicode
:param parent_id: parent node key
:type parent_id: str or unicode
:returns: True if the child descends from the parent
:rtype: bool
"""
return self.get_node(node_id).is_child_of(parent_id)
def is_parent_of(self, node_id, child_id):
"""Checks if node is a parent of the child node
:param node_id: parent node key
:type node_id: str or unicode
:param child_id: child node key
:type child_id: str or unicode
:returns: True if the child descends from parent
:rtype: bool
"""
return self.get_node(node_id).is_parent_of(child_id)
def _get_main_list_format(self, node_id):
"""Generates the parent child list recursively for saving
:param node_id: current node to process
:type node_id: str or unicode
:returns: List of lists with parent child information
:rtype: list
"""
final = []
children = self.get_children(node_id)
for c in children:
temp = [str(node_id), str(c.id), children[c]]
final.append(temp)
sub_children = self._get_main_list_format(c.id)
if len(sub_children) > 0:
final += sub_children
return final
def save(self, path):
"""Saves the hierarchy, alias, and property info in one file
:param path: File path to save out put
:type path: str or unicode
:returns: None
:rtype: None
"""
self.save_hierarchy(path, 'root')
def load(self, path):
"""Loads hierarchy, alias, and propeperty
:param path: File path to load
:type path: str or unicode
:returns: None
:rtype: None
"""
self.load_hierarchy(path)
def get_bom(self, top_node='root'):
"""Created dataframe of BOM makeup structure
:param top_node:
:type top_node: str or unicode
:returns: Parent Child Dataframe
:rtype: pandas.DataFrame
"""
headers = ['parent', 'child', 'makeup_volume']
pc_list = self._get_main_list_format(top_node)
df = DataFrame(pc_list, columns=headers)
return df
def load_dataframe(self, df):
"""Loads a well formed dataframe into the hierarchy object
Columns expected:
- parent
- child
- makeup_volume
:param df: The dataframe containing at least parent and child columns
:type df: dataframe
:returns: None
:rtype: None
"""
if df is not None:
column_info = []
for column_name, data_type in iteritems(df.dtypes):
column_info.append(column_name)
# Check to make sure all required columns are present
if 'parent' not in column_info:
raise Exception('Missing parent column. Found the following columns: {0}'.format(str(column_info)))
if 'child' not in column_info:
raise Exception('Missing child column. Found the following columns: {0}'.format(str(column_info)))
if 'makeup_volume' not in column_info:
raise Exception(
'Missing makeup_volume column. Found the following columns: {0}'.format(str(column_info)))
# order the df columns (hierarchy, parent, child, consolidation_type)
# this enables using itertuples instead of iterrows
df = df[['parent', 'child', 'makeup_volume']]
# Iterate over the data and build the hierachy using the add method
for r in df.itertuples():
# Tuple is formed as (index, hierarchy, parent, child, consolidation type)
self.add_node(r[1], r[2], r[3])
def clear(self):
self.clear_hierarchy()
def clear_hierarchy(self):
"""Clears the main and alternate hierarchies
:returns: None
:rtype: None
"""
self.h_direct = {}
self.h_children = {}
node = Node(None, 'root')
self.h_direct['root'] = node
def _get_preprocessed_main_format(self, node_id, left=0, volume_multiplier=1, indent=0):
"""Generates a highly optimized reporting format for export of main hierarchy
:param node_id: current node key
:type node_id: str or unicode
:param left: current left counter
:type left: int
:param consolidation_list: list of consolidation multipliers as json string
:type consolidation_list: str
:returns: list of parent child records
:rtype: list
"""
final = []
# If this recursed event doesn't have any records return the same value for left and right
right = left
children = self.get_children(node_id)
for c in children:
makeup_volume = children[c]
effective_volume = makeup_volume * volume_multiplier
# Get the child records recursively
sub_right, sub_children = self._get_preprocessed_main_format(c.id, left + 1, effective_volume, indent + 1)
# Now figure out the right side number based on how many elements are below
right = sub_right + 1
if len(sub_children) > 0:
is_leaf = False
else:
is_leaf = True
temp = [str(node_id), str(c.id), makeup_volume, effective_volume, is_leaf, left, right, indent]
final.append(temp)
if is_leaf is False:
final += sub_children
return (right, final)
def get_frame(self, table, top_node='root'):
"""Generates a highly optimized reporting format for export
:param path: Absolute path to export location
:type path: str or unicode
:param top_node: node key to start export at
:type top_node: str or unicode
:returns: None
:rtype: None
"""
headers = ['parent', 'child', 'makeup_volume', 'effective_makeup_volume', 'leaf', 'left', 'right', 'indent']
right, pc_list = self._get_preprocessed_main_format(top_node)
df = DataFrame(pc_list, columns=headers)
return df
def get_pretty_frame(self, table, top_node='root'):
indent = ' '
template = '{0}{1} x {2} ({3})'
right, pc_list = self._get_preprocessed_main_format(top_node)
final = []
for p in pc_list:
temp = []
parent = p[0]
child = p[1]
makeup_volume = p[2]
effective_makeup_volume = p[3]
#leaf = p[4]
#left = p[5]
#right = p[6]
indent_mult = p[7]
indent_txt = indent * indent_mult
txt = template.format(indent_txt, makeup_volume, child, effective_makeup_volume)
temp.append(txt)
temp.append(parent)
temp.append(child)
temp.append(makeup_volume)
temp.append(effective_makeup_volume)
final.append(temp)
headers = ['friendly', 'parent', 'child', 'makeup_volume', 'effective_makeup_volume']
df = | DataFrame(final, columns=headers) | pandas.DataFrame |
#!/usr/bin/env python
import tweepy
import pandas as pd
import re
from keys import Keys
'''
Regex sourced from StackOverflow.
https://stackoverflow.com/a/11332580
'''
def clean(text):
no_url = re.sub(r'http\S+', '', text)
no_lines = no_url.replace('\n', ' ').replace('\r', '').replace('\"', '').replace('\'', '').replace(' ', '')
no_hash = re.sub('@[^\s]+', '', no_lines)
return no_hash.strip()
'''
This requires some manual monitoring as it's not always clear when the Twitter
API will kick you off because of rate requests.
Current solution is to save after every handle, and then load the file if it
already exists (to continue appending to it).
This was prior to discovering `wait_on_rate_limit`.
'''
def download_tweets(handles, recover=False):
keys = Keys()
auth = tweepy.OAuthHandler(keys.consumer_token, keys.consumer_secret)
auth.set_access_token(keys.access_token, keys.access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
text = []
if recover:
text = pd.read_csv('tweets.csv', header=None, squeeze=True).values.tolist()
for handle in handles:
print(f'Downloading tweets for {handle}...')
try:
tweets = tweepy.Cursor(api.user_timeline, id=handle, tweet_mode='extended').items()
except TweepError as e:
print(e)
for tweet in tweets:
if hasattr(tweet, 'retweeted_status'):
try:
text.append(clean(tweet.retweeted_status.extended_tweet['full_text']))
except AttributeError:
text.append(clean(tweet.retweeted_status.full_text))
else:
try:
text.append(clean(tweet.extended_tweet['full_text']))
except AttributeError:
text.append(clean(tweet.full_text))
print(f'Saving tweets for {handle}...')
| pd.DataFrame(text) | pandas.DataFrame |
from pcpca import CPCA, PCPCA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.decomposition import PCA
import sys
from scipy.stats import ttest_ind
sys.path.append("../../../clvm")
from clvm import CLVM
DATA_PATH = "../../../data/mouse_protein_expression/clean/Data_Cortex_Nuclear.csv"
N_COMPONENTS = 2
if __name__ == "__main__":
# Read in data
data = pd.read_csv(DATA_PATH)
data = data.fillna(0)
# Get names of proteins
protein_names = data.columns.values[1:78]
data.Genotype[data.Genotype == "Control"] = "Non-DS"
data.Genotype[data.Genotype == "Ts65Dn"] = "DS"
# Background
Y_df = data[
(data.Behavior == "C/S")
& (data.Genotype == "Non-DS")
& (data.Treatment == "Saline")
]
Y = Y_df[protein_names].values
Y -= Y.mean(0)
Y /= Y.std(0)
Y = Y.T
# Foreground
X_df = data[(data.Behavior == "S/C") & (data.Treatment == "Saline")]
# X_df = pd.concat([X_df.iloc[:177, :], X_df.iloc[180:, :]], axis=0)
X = X_df[protein_names].values
X -= X.mean(0)
X /= X.std(0)
X = X.T
n, m = X.shape[1], Y.shape[1]
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
gamma_range_cpca = list(np.linspace(0, 400, 40))
gamma_range_pcpca = list(np.linspace(0, 0.99, 40))
# gamma_range_pcpca = [0, 0.5, 0.9]
# print(X[:5, :])
# import ipdb; ipdb.set_trace()
cluster_scores_cpca = []
cpca_gamma_plot_list = []
for ii, gamma in enumerate(gamma_range_cpca):
cpca = CPCA(gamma=n / m * gamma, n_components=N_COMPONENTS)
X_reduced, Y_reduced = cpca.fit_transform(X, Y)
X_reduced = (X_reduced.T / X_reduced.T.std(0)).T
try:
kmeans = KMeans(n_clusters=2, random_state=0).fit(X_reduced.T)
except:
cpca_fail_gamma = gamma
break
cpca_gamma_plot_list.append(gamma)
true_labels = pd.factorize(X_df.Genotype)[0]
cluster_score = silhouette_score(X=X_reduced.T, labels=true_labels)
print("gamma'={}, cluster score={}".format(gamma, cluster_score))
cluster_scores_cpca.append(cluster_score)
cluster_scores_pcpca = []
pcpca_gamma_plot_list = []
for ii, gamma in enumerate(gamma_range_pcpca):
# if gamma == 0.9:
# import ipdb; ipdb.set_trace()
pcpca = PCPCA(gamma=n / m * gamma, n_components=N_COMPONENTS)
X_reduced, Y_reduced = pcpca.fit_transform(X, Y)
if pcpca.sigma2_mle <= 0:
pcpca_fail_gamma = gamma
break
X_reduced = (X_reduced.T / X_reduced.T.std(0)).T
kmeans = KMeans(n_clusters=2, random_state=0).fit(X_reduced.T)
pcpca_gamma_plot_list.append(gamma)
true_labels = pd.factorize(X_df.Genotype)[0]
cluster_score = silhouette_score(X=X_reduced.T, labels=true_labels)
print("gamma'=*{}, cluster score={}".format(gamma, cluster_score))
cluster_scores_pcpca.append(cluster_score)
## Fit CLVM
# clvm = CLVM(
# data_dim=X.shape[0],
# n_bg=m,
# n_fg=n,
# latent_dim_shared=N_COMPONENTS,
# latent_dim_fg=N_COMPONENTS,
# )
# clvm.init_model()
# clvm.fit_model(Y, X, n_iters=10000)
# zy = clvm.qzy_mean.numpy().T
# zx = clvm.qzx_mean.numpy().T
# tx = clvm.qtx_mean.numpy().T
# clvm_cluster_score = silhouette_score(X=tx, labels=true_labels)
plt.figure(figsize=(38, 7))
plt.subplot(151)
plt.plot(cpca_gamma_plot_list, cluster_scores_cpca, "-o", linewidth=2)
plt.title("CPCA")
plt.ylim([0, 1])
plt.xlim([0, cpca_gamma_plot_list[-1] + 40])
plt.axvline(cpca_fail_gamma, color="black", linestyle="--")
plt.axhline(np.max(cluster_scores_cpca), color="red", linestyle="--")
# plt.axhline(clvm_cluster_score, color="blue", linestyle="--", label="CLVM")
plt.xlabel(r"$\gamma^\prime$")
plt.ylabel("Silhouette score")
plt.legend()
plt.subplot(152)
plt.plot(pcpca_gamma_plot_list, cluster_scores_pcpca, "-o", linewidth=2)
plt.title("PCPCA")
plt.ylim([0, 1])
plt.xlim([0, pcpca_gamma_plot_list[-1] + 0.1])
plt.axvline(pcpca_fail_gamma, color="black", linestyle="--")
plt.axhline(np.max(cluster_scores_pcpca), color="red", linestyle="--")
# plt.axhline(clvm_cluster_score, color="blue", linestyle="--", label="CLVM")
plt.xlabel(r"$\gamma^\prime$")
plt.ylabel("Silhouette score")
plt.legend()
plt.subplot(153)
cpca = CPCA(gamma=n / m * cpca_gamma_plot_list[-1], n_components=N_COMPONENTS)
X_reduced, Y_reduced = cpca.fit_transform(X, Y)
plt.title(r"CPCA, $\gamma^\prime$={}".format(round(cpca_gamma_plot_list[-1], 2)))
X_reduced_df = pd.DataFrame(X_reduced.T, columns=["PCPC1", "PCPC2"])
# [str(x) for x in kmeans.labels_]
X_reduced_df["Genotype"] = X_df.Genotype.values
Y_reduced_df = pd.DataFrame(Y_reduced.T, columns=["PCPC1", "PCPC2"])
Y_reduced_df["Genotype"] = ["Background" for _ in range(Y_reduced_df.shape[0])]
results_df = pd.concat([X_reduced_df, Y_reduced_df], axis=0)
results_df[["PCPC1", "PCPC2"]] = results_df[["PCPC1", "PCPC2"]] / results_df[
["PCPC1", "PCPC2"]
].std(0)
g = sns.scatterplot(
data=results_df,
x="PCPC1",
y="PCPC2",
hue="Genotype",
palette=["green", "orange", "gray"],
)
g.legend_.remove()
plt.xlabel("CPC1")
plt.ylabel("CPC2")
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles=handles[1:], labels=labels[1:])
plt.subplot(154)
pcpca = PCPCA(gamma=n / m * pcpca_gamma_plot_list[-1], n_components=N_COMPONENTS)
X_reduced, Y_reduced = pcpca.fit_transform(X, Y)
plt.title(r"PCPCA, $\gamma^\prime$={}".format(round(pcpca_gamma_plot_list[-1], 2)))
X_reduced_df = pd.DataFrame(X_reduced.T, columns=["PCPC1", "PCPC2"])
X_reduced_df["Genotype"] = X_df.Genotype.values
Y_reduced_df = | pd.DataFrame(Y_reduced.T, columns=["PCPC1", "PCPC2"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import scipy.io as sio
import pandas as pd
import os
def mat2df(mat_file, var=None, filepath=None):
var_in = var
if isinstance(var, str):
var = [var]
elif var is None:
if isinstance(mat_file, dict):
var = mat_file.keys()
v_names = []
# mat_file is a file path and var_list is a list of strings corresponding to structure field names
if isinstance(mat_file, str):
if os.path.isfile(mat_file):
return mat2df(sio.loadmat(mat_file, simplify_cells=True), var, filepath=mat_file)
elif os.path.isdir(mat_file):
df_list = []
for file in os.listdir(mat_file):
df_list.append(mat2df(file, var))
return pd.concat(df_list, axis=1).squeeze()
else:
print(mat_file + "is not a valid file path")
return
elif isinstance(mat_file, dict):
mat = mat_file
if any("__" in i for i in list(mat)) or any("readme" in i for i in list(mat)):
for i in list(mat):
if "__" not in i and "readme" not in i:
return mat2df(mat[i], var_in, filepath)
raise ValueError("no variable stored in {file}".format(file=filepath))
elif any(i in mat.keys() for i in var) or any("." in i for i in var):
df_list = []
for i in var:
if "." in i:
(left, right) = i.split(".", 1)
if left in mat.keys():
df_list.append(mat2df(mat[left], right, filepath))
elif i in mat.keys():
for v_name in list(set(var).intersection(mat.keys())):
v_names.append(v_name)
try:
df_list.append(pd.DataFrame(mat).filter(v_names).reset_index(drop=True)) # end
except ValueError as e:
print("warning:", e)
for cols in [mat[v_name] for v_name in v_names]:
if isinstance(cols,
dict): # if all values of dict are scalar, then an index must be provided
if all(np.isscalar(i) for i in cols.values()):
df_list.append(pd.DataFrame(cols, index=[0]))
else:
df_list.append(pd.DataFrame(cols).reset_index(drop=True))
else:
df_list.append(pd.DataFrame(cols).reset_index(drop=True))
return | pd.concat(df_list, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
from model import resnet
from model import densenet_BC
from model import vgg
import losses
import time
import data as dataset
import crl_utils
import metrics
import utils
import train
import data
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import MultiStepLR
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Confidence Aware Learning')
parser.add_argument('--epochs', default=300, type=int, help='Total number of epochs to run')
parser.add_argument('--batch_size', default=128, type=int, help='original : 128 Batch size for training')
parser.add_argument('--data', default='cifar10', type=str, help='Dataset name to use [cifar10, cifar100, svhn]')
parser.add_argument('--model', default='res', type=str, help='Models name to use [res, dense, vgg]')
parser.add_argument('--loss', default='CE', type=str, help='Loss to use [CE, CRL, Focal, MS, Contrastive, Triplet, NPair, Avg]')
parser.add_argument('--cal', default='Default', type=str, help='Calculate Correctness, Confidence')
parser.add_argument('--rank_target', default='softmax', type=str, help='Rank_target name to use [softmax, margin, entropy]')
parser.add_argument('--rank_weight', default=0.0, type=float, help='Rank loss weight')
parser.add_argument('--lr', default = 1e-4, type =float,help = 'Learning rate setting')
parser.add_argument('--weight-decay', default = 1e-4, type =float, help = 'Weight decay setting')
parser.add_argument('--lr-decay-step', default = 10, type =int, help = 'Learning decay step setting')
parser.add_argument('--lr-decay-gamma', default = 0.5, type =float, help = 'Learning decay gamma setting')
parser.add_argument('--data_path', default='/mnt/hdd0/jiizero/', type=str, help='Dataset directory')
parser.add_argument('--save_path', default='./test/', type=str, help='Savefiles directory')
parser.add_argument('--gpu', default='0', type=str, help='GPU id to use')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--sort', action='store_true', help='sample sort-> hard sample and easy sample')
parser.add_argument('--sort_mode', type=float, default=0, help='sample sort-> 0: acc/conf 1: conf')
parser.add_argument('--valid', action='store_true', help='is_use_validset')
parser.add_argument('--calibrate', action='store_true', help='is_use_validset')
parser.add_argument('--b', type=float, default=None, help='Flood level')
parser.add_argument('--ts', type=float, default=None, help='Temperature Scaling')
parser.add_argument('--mixup', type=float, default=None, help='Mixup with alpha')
parser.add_argument('--rot', action='store_true', help='RotNet')
parser.add_argument('--ji_conf', action='store_true', help='b*(1.5-conf)')
parser.add_argument('--minus_1_conf', action='store_true', help='b*(1/conf)')
parser.add_argument('--ji_acc_conf', action='store_true', help='b*(acc/conf)')
parser.add_argument('--ji_wj', type=float, default=0, help='ce + |soft-acc|')
parser.add_argument('--mode', type=float, default=0, help='batch : 0 (default), sample : 1')
args = parser.parse_args()
def main():
file_name = "./flood_graph/150_250/128/500/ji_sort/1_conf/sample-wised/default/{}/".format(args.b)
start = time.time()
# set GPU ID
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
cudnn.benchmark = True
# check save path
save_path = file_name
# save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
# make dataloader
if args.valid == True:
train_loader, valid_loader, test_loader, test_onehot, test_label = dataset.get_valid_loader(args.data, args.data_path,
args.batch_size)
else:
train_loader, train_onehot, train_label, test_loader, test_onehot, test_label = dataset.get_loader(args.data, args.data_path, args.batch_size)
# set num_class
if args.data == 'cifar100':
num_class = 100
else:
num_class = 10
# set num_classes
model_dict = {
"num_classes": num_class,
}
# set model
if args.model == 'res':
model = resnet.resnet110(**model_dict).cuda()
elif args.model == 'dense':
model = densenet_BC.DenseNet3(depth=100,
num_classes=num_class,
growth_rate=12,
reduction=0.5,
bottleneck=True,
dropRate=0.0).cuda()
elif args.model == 'vgg':
model = vgg.vgg16(**model_dict).cuda()
# set criterion
if args.loss == 'MS':
cls_criterion = losses.MultiSimilarityLoss().cuda()
elif args.loss == 'Contrastive':
cls_criterion = losses.ContrastiveLoss().cuda()
elif args.loss == 'Triplet':
cls_criterion = losses.TripletLoss().cuda()
elif args.loss == 'NPair':
cls_criterion = losses.NPairLoss().cuda()
elif args.loss == 'Focal':
cls_criterion = losses.FocalLoss(gamma=3.0).cuda()
else:
if args.mode == 0:
cls_criterion = nn.CrossEntropyLoss().cuda()
else:
cls_criterion = nn.CrossEntropyLoss(reduction="none").cuda()
ranking_criterion = nn.MarginRankingLoss(margin=0.0).cuda()
# set optimizer (default:sgd)
optimizer = optim.SGD(model.parameters(),
lr=0.1,
momentum=0.9,
weight_decay=5e-4,
# weight_decay=0.0001,
nesterov=False)
# optimizer = optim.SGD(model.parameters(),
# lr=float(args.lr),
# momentum=0.9,
# weight_decay=args.weight_decay,
# nesterov=False)
# set scheduler
# scheduler = MultiStepLR(optimizer,
# milestones=[500, 750],
# gamma=0.1)
scheduler = MultiStepLR(optimizer,
milestones=[150, 250],
gamma=0.1)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay_gamma)
# make logger
train_logger = utils.Logger(os.path.join(save_path, 'train.log'))
result_logger = utils.Logger(os.path.join(save_path, 'result.log'))
# make History Class
correctness_history = crl_utils.History(len(train_loader.dataset))
## define matrix
if args.data == 'cifar':
matrix_idx_confidence = [[_] for _ in range(50000)]
matrix_idx_iscorrect = [[_] for _ in range(50000)]
else:
matrix_idx_confidence = [[_] for _ in range(73257)]
matrix_idx_iscorrect = [[_] for _ in range(73257)]
# write csv
#'''
import csv
f = open('{}/logs_{}_{}.txt'.format(file_name, args.b, args.epochs),'w', newline='')
f.write("location = {}\n\n".format(file_name)+str(args))
f0 = open('{}/Test_confidence_{}_{}.csv'.format(file_name, args.b, args.epochs), 'w', newline='')
# f0 = open('./baseline_graph/150_250/128/500/Test_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
# f0 = open('./CRL_graph/150_250/Test_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
wr_conf_test = csv.writer(f0)
header = [_ for _ in range(args.epochs + 1)]
header[0] = 'Epoch'
wr_conf_test.writerows([header])
f1 = open('{}/Train_confidence_{}_{}.csv'.format(file_name, args.b, args.epochs), 'w', newline='')
# f1 = open('./baseline_graph/150_250/128/500/Train_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
# f1 = open('./CRL_graph/150_250/Train_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
wr = csv.writer(f1)
header = [_ for _ in range(args.epochs + 1)]
header[0] = 'Epoch'
wr.writerows([header])
f2 = open('{}/Train_Flood_{}_{}_{}.csv'.format(file_name, args.data, args.b, args.epochs), 'w', newline='')
# f2 = open('./baseline_graph/150_250/128/500/Train_Base_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
# f2 = open('./CRL_graph/150_250/Train_Flood_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
wr_train = csv.writer(f2)
header = [_ for _ in range(args.epochs+1)]
header[0] = 'Epoch'
wr_train.writerows([header])
f3 = open('{}/Test_Flood_{}_{}_{}.csv'.format(file_name, args.data, args.b, args.epochs), 'w', newline='')
# f3 = open('./baseline_graph/150_250/128/500/Test_Base_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
# f3 = open('./CRL_graph/150_250/Test_Flood_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
wr_test = csv.writer(f3)
header = [_ for _ in range(args.epochs+1)]
header[0] = 'Epoch'
wr_test.writerows([header])
#'''
# start Train
best_valid_acc = 0
test_ece_report = []
test_acc_report = []
test_nll_report = []
test_over_con99_report = []
test_e99_report = []
test_cls_loss_report = []
train_ece_report = []
train_acc_report = []
train_nll_report = []
train_over_con99_report = []
train_e99_report = []
train_cls_loss_report = []
train_rank_loss_report = []
train_total_loss_report = []
for epoch in range(1, args.epochs + 1):
scheduler.step()
matrix_idx_confidence, matrix_idx_iscorrect, idx, iscorrect, confidence, target, cls_loss_tr, rank_loss_tr, batch_correctness, total_confidence, total_correctness = \
train.train(matrix_idx_confidence, matrix_idx_iscorrect, train_loader,
model,
wr,
cls_criterion,
ranking_criterion,
optimizer,
epoch,
correctness_history,
train_logger,
args)
if args.rank_weight != 0.0:
print("RANK ", rank_loss_tr)
total_loss_tr = cls_loss_tr + rank_loss_tr
if args.valid == True:
idx, iscorrect, confidence, target, cls_loss_val, acc = train.valid(valid_loader,
model,
cls_criterion,
ranking_criterion,
optimizer,
epoch,
correctness_history,
train_logger,
args)
if acc > best_valid_acc:
best_valid_acc = acc
print("*** Update Best Acc ***")
# save model
if epoch == args.epochs:
torch.save(model.state_dict(),
os.path.join(save_path, 'model.pth'))
print("########### Train ###########")
acc_tr, aurc_tr, eaurc_tr, aupr_tr, fpr_tr, ece_tr, nll_tr, brier_tr, E99_tr, over_99_tr, cls_loss_tr = metrics.calc_metrics(train_loader,
train_label,
train_onehot,
model,
cls_criterion, args)
if args.sort == True and epoch == 260:
#if args.sort == True:
train_loader = dataset.sort_get_loader(args.data, args.data_path,
args.batch_size, idx, np.array(target),
iscorrect, batch_correctness, total_confidence, total_correctness, np.array(confidence), epoch, args)
train_acc_report.append(acc_tr)
train_nll_report.append(nll_tr*10)
train_ece_report.append(ece_tr)
train_over_con99_report.append(over_99_tr)
train_e99_report.append(E99_tr)
train_cls_loss_report.append(cls_loss_tr)
if args.rank_weight != 0.0:
train_total_loss_report.append(total_loss_tr)
train_rank_loss_report.append(rank_loss_tr)
print("CLS ", cls_loss_tr)
# finish train
print("########### Test ###########")
# calc measure
acc_te, aurc_te, eaurc_te, aupr_te, fpr_te, ece_te, nll_te, brier_te, E99_te, over_99_te, cls_loss_te = metrics.calc_metrics(test_loader,
test_label,
test_onehot,
model,
cls_criterion, args)
test_ece_report.append(ece_te)
test_acc_report.append(acc_te)
test_nll_report.append(nll_te*10)
test_over_con99_report.append(over_99_te)
test_e99_report.append(E99_te)
test_cls_loss_report.append(cls_loss_te)
print("CLS ", cls_loss_te)
print("############################")
# for idx in matrix_idx_confidence:
# wr.writerow(idx)
#'''
# draw graph
df = pd.DataFrame()
df['epoch'] = [i for i in range(1, args.epochs + 1)]
df['test_ece'] = test_ece_report
df['train_ece'] = train_ece_report
fig_loss = plt.figure(figsize=(35, 35))
fig_loss.set_facecolor('white')
ax = fig_loss.add_subplot()
ax.plot(df['epoch'], df['test_ece'], df['epoch'], df['train_ece'], linewidth=10)
ax.legend(['Test', 'Train'], loc = 2, prop={'size': 60})
plt.title('[FL] ECE per epoch', fontsize=80)
# plt.title('[BASE] ECE per epoch', fontsize=80)
# plt.title('[CRL] ECE per epoch', fontsize=80)
plt.xlabel('Epoch', fontsize=70)
plt.ylabel('ECE', fontsize=70)
plt.ylim([0, 1])
plt.setp(ax.get_xticklabels(), fontsize=30)
plt.setp(ax.get_yticklabels(), fontsize=30)
plt.savefig('{}/{}_{}_ECE_lr_{}.png'.format(file_name, args.model, args.b, args.epochs))
# plt.savefig('./baseline_graph/150_250/128/500/{}_{}_ECE_lr_{}.png'.format(args.model, args.b, args.epochs))
# plt.savefig('./CRL_graph/150_250/{}_{}_ECE_lr_{}.png'.format(args.model, args.b, args.epochs))
df2 = pd.DataFrame()
df2['epoch'] = [i for i in range(1, args.epochs + 1)]
df2['test_acc'] = test_acc_report
df2['train_acc'] = train_acc_report
fig_acc = plt.figure(figsize=(35, 35))
fig_acc.set_facecolor('white')
ax = fig_acc.add_subplot()
ax.plot(df2['epoch'], df2['test_acc'], df2['epoch'], df2['train_acc'], linewidth=10)
ax.legend(['Test', 'Train'], loc = 2, prop={'size': 60})
plt.title('[FL] Accuracy per epoch', fontsize=80)
# plt.title('[BASE] Accuracy per epoch', fontsize=80)
# plt.title('[CRL] Accuracy per epoch', fontsize=80)
plt.xlabel('Epoch', fontsize=70)
plt.ylabel('Accuracy', fontsize=70)
plt.ylim([0, 100])
plt.setp(ax.get_xticklabels(), fontsize=30)
plt.setp(ax.get_yticklabels(), fontsize=30)
plt.savefig('{}/{}_{}_acc_lr_{}.png'.format(file_name, args.model, args.b, args.epochs))
# plt.savefig('./baseline_graph/150_250/128/500/{}_{}_acc_lr_{}.png'.format(args.model, args.b, args.epochs))
# plt.savefig('./CRL_graph/150_250/{}_{}_acc_lr_{}.png'.format(args.model, args.b, args.epochs))
df3 = pd.DataFrame()
df3['epoch'] = [i for i in range(1, args.epochs + 1)]
df3['test_nll'] = test_nll_report
df3['train_nll'] = train_nll_report
fig_acc = plt.figure(figsize=(35, 35))
fig_acc.set_facecolor('white')
ax = fig_acc.add_subplot()
ax.plot(df3['epoch'], df3['test_nll'], df3['epoch'], df3['train_nll'], linewidth=10)
ax.legend(['Test', 'Train'], loc = 2, prop={'size': 60})
plt.title('[FL] NLL per epoch', fontsize=80)
# plt.title('[BASE] NLL per epoch', fontsize=80)
# plt.title('[CRL] NLL per epoch', fontsize=80)
plt.xlabel('Epoch', fontsize=70)
plt.ylabel('NLL', fontsize=70)
plt.ylim([0, 45])
plt.setp(ax.get_xticklabels(), fontsize=30)
plt.setp(ax.get_yticklabels(), fontsize=30)
plt.savefig('{}/{}_{}_nll_lr_{}.png'.format(file_name, args.model, args.b, args.epochs))
# plt.savefig('./baseline_graph/150_250/128/500/{}_{}_nll_lr_{}.png'.format(args.model, args.b, args.epochs))
# plt.savefig('./CRL_graph/150_250/{}_{}_nll_lr_{}.png'.format(args.model, args.b, args.epochs))
df4 = pd.DataFrame()
df4['epoch'] = [i for i in range(1, args.epochs + 1)]
df4['test_over_con99'] = test_over_con99_report
df4['train_over_con99'] = train_over_con99_report
fig_acc = plt.figure(figsize=(35, 35))
fig_acc.set_facecolor('white')
ax = fig_acc.add_subplot()
ax.plot(df4['epoch'], df4['test_over_con99'], df4['epoch'], df4['train_over_con99'], linewidth=10)
ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
plt.title('[FL] Over conf99 per epoch', fontsize=80)
# plt.title('[BASE] Over conf99 per epoch', fontsize=80)
# plt.title('[CRL] Over conf99 per epoch', fontsize=80)
plt.xlabel('Epoch', fontsize=70)
plt.ylabel('Over con99', fontsize=70)
if args.data == 'cifar10' or args.data == 'cifar100':
plt.ylim([0, 50000])
else:
plt.ylim([0, 73257])
plt.setp(ax.get_xticklabels(), fontsize=30)
plt.setp(ax.get_yticklabels(), fontsize=30)
plt.savefig('{}/{}_{}_over_conf99_lr_{}.png'.format(file_name, args.model, args.b, args.epochs))
# plt.savefig('./baseline_graph/150_250/128/500/{}_{}_over_conf99_lr_{}.png'.format(args.model, args.b, args.epochs))
# plt.savefig('./CRL_graph/150_250/{}_{}_over_conf99_lr_{}.png'.format(args.model, args.b, args.epochs))
df5 = pd.DataFrame()
df5['epoch'] = [i for i in range(1, args.epochs + 1)]
df5['test_e99'] = test_e99_report
df5['train_e99'] = train_e99_report
fig_acc = plt.figure(figsize=(35, 35))
fig_acc.set_facecolor('white')
ax = fig_acc.add_subplot()
ax.plot(df5['epoch'], df5['test_e99'], df5['epoch'], df5['train_e99'], linewidth=10)
ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
plt.title('[FL] E99 per epoch', fontsize=80)
# plt.title('[BASE] E99 per epoch', fontsize=80)
# plt.title('[CRL] E99 per epoch', fontsize=80)
plt.xlabel('Epoch', fontsize=70)
plt.ylabel('E99', fontsize=70)
plt.ylim([0, 0.2])
plt.setp(ax.get_xticklabels(), fontsize=30)
plt.setp(ax.get_yticklabels(), fontsize=30)
plt.savefig('{}/{}_{}_E99_flood_lr_{}.png'.format(file_name,args.model, args.b, args.epochs))
# plt.savefig('./baseline_graph/150_250/128/500/{}_{}_E99_flood_lr_{}.png'.format(args.model, args.b, args.epochs))
# plt.savefig('./CRL_graph/150_250/{}_{}_E99_flood_lr_{}.png'.format(args.model, args.b, args.epochs))
df5 = | pd.DataFrame() | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf import melt as cudf_melt
from cudf.core import DataFrame
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
)
@pytest.mark.parametrize("num_id_vars", [0, 1, 2, 10])
@pytest.mark.parametrize("num_value_vars", [0, 1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_melt(nulls, num_id_vars, num_value_vars, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some", "all"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame()
id_vars = []
for i in range(num_id_vars):
colname = "id" + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
elif nulls == "all":
data[:] = np.nan
pdf[colname] = data
id_vars.append(colname)
value_vars = []
for i in range(num_value_vars):
colname = "val" + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
elif nulls == "all":
data[:] = np.nan
pdf[colname] = data
value_vars.append(colname)
gdf = DataFrame.from_pandas(pdf)
got = cudf_melt(frame=gdf, id_vars=id_vars, value_vars=value_vars)
got_from_melt_method = gdf.melt(id_vars=id_vars, value_vars=value_vars)
expect = pd.melt(frame=pdf, id_vars=id_vars, value_vars=value_vars)
# pandas' melt makes the 'variable' column of 'object' type (string)
# cuDF's melt makes it Categorical because it doesn't support strings
expect["variable"] = expect["variable"].astype("category")
assert_eq(expect, got)
assert_eq(expect, got_from_melt_method)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize(
"dtype",
list(NUMERIC_TYPES + DATETIME_TYPES)
+ [pytest.param("str", marks=pytest.mark.xfail())],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_df_stack(nulls, num_cols, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame()
for i in range(num_cols):
colname = str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got = gdf.stack()
expect = pdf.stack()
if {None} == set(expect.index.names):
expect.rename_axis(
list(range(0, len(expect.index.names))), inplace=True
)
assert_eq(expect, got)
pass
@pytest.mark.parametrize("num_rows", [1, 2, 10, 1000])
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize(
"dtype", NUMERIC_TYPES + DATETIME_TYPES + ["category"]
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_interleave_columns(nulls, num_cols, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame(dtype=dtype)
for i in range(num_cols):
colname = str(i)
data = pd.Series(np.random.randint(0, 26, num_rows)).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
if dtype == "category":
with pytest.raises(ValueError):
assert gdf.interleave_columns()
else:
got = gdf.interleave_columns()
expect = pd.Series(np.vstack(pdf.to_numpy()).reshape((-1,))).astype(
dtype
)
assert_eq(expect, got)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize("count", [1, 2, 10])
@pytest.mark.parametrize("dtype", ALL_TYPES)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_tile(nulls, num_cols, num_rows, dtype, count):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame(dtype=dtype)
for i in range(num_cols):
colname = str(i)
data = pd.Series(np.random.randint(num_cols, 26, num_rows)).astype(
dtype
)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got = gdf.tile(count)
expect = pd.DataFrame(pd.concat([pdf] * count))
assert_eq(expect, got)
def _prepare_merge_sorted_test(
size,
nparts,
keys,
add_null=False,
na_position="last",
ascending=True,
series=False,
index=False,
):
if index:
df = (
cudf.datasets.timeseries()[:size]
.reset_index(drop=False)
.set_index(keys, drop=True)
)
else:
df = cudf.datasets.timeseries()[:size].reset_index(drop=False)
if add_null:
df.iloc[1, df.columns.get_loc(keys[0])] = None
chunk = int(size / nparts)
indices = [i * chunk for i in range(0, nparts)] + [size]
if index:
dfs = [
df.iloc[indices[i] : indices[i + 1]]
.copy()
.sort_index(ascending=ascending)
for i in range(nparts)
]
elif series:
df = df[keys[0]]
dfs = [
df.iloc[indices[i] : indices[i + 1]]
.copy()
.sort_values(na_position=na_position, ascending=ascending)
for i in range(nparts)
]
else:
dfs = [
df.iloc[indices[i] : indices[i + 1]]
.copy()
.sort_values(keys, na_position=na_position, ascending=ascending)
for i in range(nparts)
]
return df, dfs
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("keys", [None, ["id"], ["name", "timestamp"]])
@pytest.mark.parametrize("nparts", [2, 10])
def test_df_merge_sorted(nparts, keys, na_position, ascending):
size = 100
keys_1 = keys or ["timestamp"]
# Null values NOT currently supported with Categorical data
# or when `ascending=False`
add_null = keys_1[0] not in ("name")
df, dfs = _prepare_merge_sorted_test(
size,
nparts,
keys_1,
add_null=add_null,
na_position=na_position,
ascending=ascending,
)
expect = df.sort_values(
keys_1, na_position=na_position, ascending=ascending
)
result = cudf.merge_sorted(
dfs, keys=keys, na_position=na_position, ascending=ascending
)
if keys:
expect = expect[keys]
result = result[keys]
assert expect.index.dtype == result.index.dtype
assert_eq(expect.reset_index(drop=True), result.reset_index(drop=True))
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("index", ["id", "x"])
@pytest.mark.parametrize("nparts", [2, 10])
def test_df_merge_sorted_index(nparts, index, ascending):
size = 100
df, dfs = _prepare_merge_sorted_test(
size, nparts, index, ascending=ascending, index=True
)
expect = df.sort_index(ascending=ascending)
result = cudf.merge_sorted(dfs, by_index=True, ascending=ascending)
assert_eq(expect.index, result.index)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("keys", [None, ["name", "timestamp"]])
def test_df_merge_sorted_ignore_index(keys, na_position, ascending):
size = 100
nparts = 3
keys_1 = keys or ["timestamp"]
# Null values NOT currently supported with Categorical data
# or when `ascending=False`
add_null = keys_1[0] not in ("name")
df, dfs = _prepare_merge_sorted_test(
size,
nparts,
keys_1,
add_null=add_null,
na_position=na_position,
ascending=ascending,
)
expect = df.sort_values(
keys_1, na_position=na_position, ascending=ascending
)
result = cudf.merge_sorted(
dfs,
keys=keys,
na_position=na_position,
ascending=ascending,
ignore_index=True,
)
if keys:
expect = expect[keys]
result = result[keys]
assert_eq(expect.reset_index(drop=True), result)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("key", ["id", "name", "timestamp"])
@pytest.mark.parametrize("nparts", [2, 10])
def test_series_merge_sorted(nparts, key, na_position, ascending):
size = 100
df, dfs = _prepare_merge_sorted_test(
size,
nparts,
[key],
na_position=na_position,
ascending=ascending,
series=True,
)
expect = df.sort_values(na_position=na_position, ascending=ascending)
result = cudf.merge_sorted(
dfs, na_position=na_position, ascending=ascending
)
assert_eq(expect.reset_index(drop=True), result.reset_index(drop=True))
@pytest.mark.parametrize(
"index, column, data",
[
([], [], []),
([0], [0], [0]),
([0, 0], [0, 1], [1, 2.0]),
([0, 1], [0, 0], [1, 2.0]),
([0, 1], [0, 1], [1, 2.0]),
(["a", "a", "b", "b"], ["c", "d", "c", "d"], [1, 2, 3, 4]),
(
["a", "a", "b", "b", "a"],
["c", "d", "c", "d", "e"],
[1, 2, 3, 4, 5],
),
],
)
def test_pivot_simple(index, column, data):
pdf = | pd.DataFrame({"index": index, "column": column, "data": data}) | pandas.DataFrame |
import os
import pandas as pd
import matplotlib.pyplot as plt
os.chdir(os.path.dirname(os.path.abspath(__file__)))
df = | pd.read_csv('./test.csv', header=0, encoding='utf-8') | pandas.read_csv |
import os
import operator
import unittest
import numpy as np
from pandas.core.api import DataFrame, Index, notnull
from pandas.core.datetools import bday
from pandas.core.panel import (WidePanel, LongPanelIndex, LongPanel,
group_agg, pivot)
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.core.panel as panelm
import pandas.util.testing as common
class PanelTests(object):
def test_iter(self):
common.equalContents(list(self.panel), self.panel.items)
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_repr(self):
foo = repr(self.panel)
def test_set_values(self):
self.panel.values = np.array(self.panel.values, order='F')
assert(self.panel.values.flags.contiguous)
def _check_statistic(self, frame, name, alternative):
f = getattr(frame, name)
for i, ax in enumerate(['items', 'major', 'minor']):
result = f(axis=i)
assert_frame_equal(result, frame.apply(alternative, axis=ax))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_statistic(self.panel, 'count', f)
def test_sum(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.sum()
self._check_statistic(self.panel, 'sum', f)
def test_prod(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return np.prod(nona)
self._check_statistic(self.panel, 'prod', f)
def test_mean(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].mean()
self._check_statistic(self.panel, 'mean', f)
def test_median(self):
def f(x):
x = np.asarray(x)
return np.median(x[notnull(x)])
self._check_statistic(self.panel, 'median', f)
def test_min(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.min()
self._check_statistic(self.panel, 'min', f)
def test_max(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.max()
self._check_statistic(self.panel, 'max', f)
def test_var(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.var(ddof=1)
self._check_statistic(self.panel, 'var', f)
def test_std(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.std(ddof=1)
self._check_statistic(self.panel, 'std', f)
def test_skew(self):
return
try:
from scipy.stats import skew
except ImportError:
return
def f(x):
x = np.asarray(x)
return skew(x[notnull(x)], bias=False)
self._check_statistic(self.panel, 'skew', f)
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
class TestWidePanel(unittest.TestCase, PanelTests):
def setUp(self):
self.panel = common.makeWidePanel()
common.add_nans(self.panel)
def test_get_axis(self):
assert(self.panel._get_axis(0) is self.panel.items)
assert(self.panel._get_axis(1) is self.panel.major_axis)
assert(self.panel._get_axis(2) is self.panel.minor_axis)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major')
self.assertEqual(self.panel._get_axis_name(2), 'minor')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major')
index, columns = self.panel._get_plane_axes('minor')
index, columns = self.panel._get_plane_axes(0)
def test_arith(self):
def test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
test_op(self.panel, operator.add)
test_op(self.panel, operator.sub)
test_op(self.panel, operator.mul)
test_op(self.panel, operator.div)
test_op(self.panel, operator.pow)
test_op(self.panel, lambda x, y: y + x)
test_op(self.panel, lambda x, y: y - x)
test_op(self.panel, lambda x, y: y * x)
test_op(self.panel, lambda x, y: y / x)
test_op(self.panel, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
def test_fromDict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A' : itema, 'B' : itemb[5:]}
wp = WidePanel.fromDict(d)
self.assert_(wp.major_axis.equals(self.panel.major_axis))
# intersect
wp = WidePanel.fromDict(d, intersect=True)
self.assert_(wp.major_axis.equals(itemb.index[5:]))
def test_keys(self):
common.equalContents(self.panel.keys(), self.panel.items)
def test_iteritems(self):
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
def test_values(self):
self.assertRaises(Exception, WidePanel, np.random.randn(5, 5, 5),
range(5), range(5), range(4))
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assert_('ItemA' not in self.panel.items)
del self.panel['ItemB']
self.assert_('ItemB' not in self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = WidePanel(values, range(3), range(3), range(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
| assert_frame_equal(panelc[1], panel[1]) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import re
import os
import json
import pandas as pd
import numpy as np
import sys
# In[ ]:
def txt_to_dataframe(algoritmo, parametro):
csv = []
for file_name in sorted(os.listdir()):
if file_name.startswith(algoritmo+"_"+parametro) and file_name.endswith(".txt"):
with open(file_name) as file:
linhas = file.readlines()
metricas = dict([re.findall("([^ ]*) *([0-9]*[.]?[0-9]*).*#",linha)[0] for linha in linhas[2:-2]])
variavel = file_name[len(algoritmo+"_"+parametro+"_"):-4]
try:
metricas[parametro] = int(variavel)
except:
metricas[parametro] = variavel
csv.append(metricas)
df = | pd.DataFrame(csv) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import json
import logging
import re
from collections import defaultdict, OrderedDict
from contextlib import closing
from dataclasses import dataclass, field # pylint: disable=wrong-import-order
from datetime import datetime, timedelta
from typing import (
Any,
cast,
Dict,
Hashable,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
import pandas as pd
import sqlalchemy as sa
import sqlparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_babel import lazy_gettext as _
from jinja2.exceptions import TemplateError
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
Enum,
ForeignKey,
Integer,
or_,
select,
String,
Table,
Text,
update,
)
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm import backref, Query, relationship, RelationshipProperty, Session
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.sql import column, ColumnElement, literal_column, table, text
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.sql.expression import Label, Select, TextAsFrom, TextClause
from sqlalchemy.sql.selectable import Alias, TableClause
from sqlalchemy.types import TypeEngine
from superset import app, db, is_feature_enabled, security_manager
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
QueryObjectValidationError,
SupersetGenericDBErrorException,
SupersetSecurityException,
)
from superset.jinja_context import (
BaseTemplateProcessor,
ExtraCache,
get_template_processor,
)
from superset.models.annotations import Annotation
from superset.models.core import Database
from superset.models.helpers import AuditMixinNullable, QueryResult
from superset.result_set import SupersetResultSet
from superset.sql_parse import ParsedQuery
from superset.typing import AdhocMetric, Metric, OrderBy, QueryObjectDict
from superset.utils import core as utils
from superset.utils.core import GenericDataType, remove_duplicates
config = app.config
metadata = Model.metadata # pylint: disable=no-member
logger = logging.getLogger(__name__)
VIRTUAL_TABLE_ALIAS = "virtual_table"
class SqlaQuery(NamedTuple):
extra_cache_keys: List[Any]
labels_expected: List[str]
prequeries: List[str]
sqla_query: Select
class QueryStringExtended(NamedTuple):
labels_expected: List[str]
prequeries: List[str]
sql: str
@dataclass
class MetadataResult:
added: List[str] = field(default_factory=list)
removed: List[str] = field(default_factory=list)
modified: List[str] = field(default_factory=list)
class AnnotationDatasource(BaseDatasource):
"""Dummy object so we can query annotations using 'Viz' objects just like
regular datasources.
"""
cache_timeout = 0
changed_on = None
type = "annotation"
column_names = [
"created_on",
"changed_on",
"id",
"start_dttm",
"end_dttm",
"layer_id",
"short_descr",
"long_descr",
"json_metadata",
"created_by_fk",
"changed_by_fk",
]
def query(self, query_obj: QueryObjectDict) -> QueryResult:
error_message = None
qry = db.session.query(Annotation)
qry = qry.filter(Annotation.layer_id == query_obj["filter"][0]["val"])
if query_obj["from_dttm"]:
qry = qry.filter(Annotation.start_dttm >= query_obj["from_dttm"])
if query_obj["to_dttm"]:
qry = qry.filter(Annotation.end_dttm <= query_obj["to_dttm"])
status = utils.QueryStatus.SUCCESS
try:
df = pd.read_sql_query(qry.statement, db.engine)
except Exception as ex: # pylint: disable=broad-except
df = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
lakes = | pd.read_table('Feb2015GoranList.csv', sep=',') | pandas.read_table |
import glob
import os
import pandas as pd
from datetime import datetime
from datetime import timedelta
file_pattern = ':dir/*:year_:month.csv'
proc_dir = 'processed/'
station_file = proc_dir + 'lcd_station_usa_west_coast.csv'
years = ['2015']
months = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
if not os.path.isfile(station_file):
print('Please run process_noaa_stations.py first!')
exit()
stations = pd.read_csv(station_file)
if not os.path.isdir(proc_dir):
os.mkdir(proc_dir)
for year in years:
for month in months:
pattern = file_pattern.replace(':dir', '.') \
.replace(':year', year) \
.replace(':month', month)
files = glob.glob(pattern)
for file in files:
print('Processing file', file)
proc_file = file.replace('./', proc_dir)
df = | pd.read_csv(file) | pandas.read_csv |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, | pd.Timestamp("2015-01-07") | pandas.Timestamp |
import numpy as np
import random
import pandas as pd
from itertools import combinations
items_set = ['beer','burger','milk','onion','potato']
max_trn = 20
data=np.random.randint(2, size=(random.randint(1,max_trn),len(items_set)))
df = | pd.DataFrame(data) | pandas.DataFrame |
# vector operations on series with indexes.
import pandas as pd
# Change False to True for each block of code to see what it does
# Addition when indexes are the same
if True:
s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
s2 = pd.Series([10, 20, 30, 40], index=['a', 'b', 'c', 'd'])
print (s1 + s2)
# Indexes have same elements in a different order
if True:
s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
s2 = pd.Series([10, 20, 30, 40], index=['b', 'd', 'a', 'c'])
print (s1 + s2)
# Indexes overlap, but do not have exactly the same elements
if True:
s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
s2 = | pd.Series([10, 20, 30, 40], index=['c', 'd', 'e', 'f']) | pandas.Series |
import pandas as pd
import numpy as np
#seriler
gk = pd.Series([0.33, 4.87, 5.97, 0.643], index=['merkur', 'venus', 'dunya', 'mars'], name='Kutle') # pandas serisi
ps = pd.Series(np.random.randn(5)*7) # indexsiz seri
ds = pd.Series(({'merkur':0.33,'venus':4.87})) # dict
ds2 = pd.Series(({'merkur':0.33,'venus':4.87}), index=['venus', 'merkur']) # farklı sıralamalı sözlük
# serilerde işlemler
#print(gk[:2]);print()
#print(gk[::2]);print()
#print(gk > gk['merkur']);print()
#print(gk[gk > gk['merkur']]);print()
#print(gk[:-1]);print() # sondan bir eksik
#print(gk.median());print() # dizinin orta değeri
#print('Betelgeuse' in gk)
#print(gk.mean(), gk.std(), gk.mean()-gk.std()) # ortalama ve standart sapma
#print(gk**2) # dizideki tüm değerlerin karesi
#print(gk/3.14159**3)
a = pd.Series(np.random.randn(10)*3.14159)
b = pd.Series(np.random.randn(10)*3.14159)
ab = pd.Series((a*b))
#print(ab,ab.name)
# serilere veri ekleme
ex = | pd.Series(({'alpha':1, 'beta':2})) | pandas.Series |
# -*- coding: utf-8 -*-
# HA - Tool version 2.1
# <NAME>
# MIT License
# 2021
import os
import re
from shutil import move as move
from sys import exit as sys_exit
import threading
from time import sleep as sleep
from datetime import datetime
import pandas as pd
import sqlalchemy
from dotenv import load_dotenv, dotenv_values
class HaTool:
_fred = threading.Thread() # thread management
_lock = threading.Lock() # thread lock
_engine = None # Database connection
_config = dotenv_values(".env") # Env vars
_raw_data_table = _config["table_raw"] # table raw uploaded
_overview_table = _config["table_overview"] # summary table
_log_table = _config["created_trips_table"] # log filename in db
_path = _config["PathToTripData"] # path to the target path for txt data
_threads = _config["process"] # number of processes
def __init__(self):
"""check if System is ready configure"""
load_dotenv()
self._config = dotenv_values(".env")
if not (os.path.isdir(self._path)):
os.makedirs(self._path)
self._login_value()
self._todo_trips = []
self._task_list = None
def _login_value(self):
"""Connection to the Database and log """
db_user = self._config["DB_USERNAME"]
db_passwd = self._config["<PASSWORD>"]
db_ip = self._config["DB_HOST"]
db_schema = self._config["DB_SCHEMA"]
# [driver]://[username][password]@[IP]/[Schema in DB]
db_uri = f'mysql+pymysql://{db_user}:{db_passwd}@{db_ip}:3306/{db_schema}'
self._engine = sqlalchemy.create_engine(db_uri) # connect to Database
try:
now = datetime.now()
data = {'username': [db_user], "time": [now.strftime("%d/%m/%Y, %H:%M:%S")], "Remote": [db_ip],
"OS": ["RPI"]}
pd.DataFrame(data).to_sql("python_log", con=self._engine, if_exists='append')
except Exception:
print("----------------\n\n Error while logging in Database\n\n----------------")
sys_exit()
def _get_last_trip(self, table, trip_id="trip_counter"):
"""return last trip on the Database"""
try:
return pd.read_sql_query(f'SELECT {trip_id} FROM {table} ORDER BY {trip_id} DESC limit 1;',
con=self._engine)
except Exception:
print(f'last trip Error \n{table} \n{trip_id}\n--------------------')
return pd.DataFrame({trip_id: [0]})
def _get_last_trip_number(self):
"""return the number of the last recorded Trip"""
try:
start_trip_number = int(self._get_last_trip(self._overview_table, "trip_number").at[0, 'trip_number'])
target_trip_number = self._get_last_trip(self._raw_data_table).at[0, 'trip_counter']
if target_trip_number == start_trip_number:
print("all uploaded")
return -1
else:
return start_trip_number
except Exception:
print("Error")
return 0
def _getMissiongSummaryTrips(self):
ids = []
try:
values = pd.read_sql_query(f'''SELECT trip_counter
FROM {self._raw_data_table}
WHERE {self._raw_data_table}.trip_counter NOT IN
(SELECT {self._overview_table}.trip_number FROM {self._overview_table})
group by trip_counter''',
con=self._engine)
for index, row in values.iterrows():
ids.append(row['trip_counter'])
except Exception:
print("Summary not founded")
values = pd.read_sql_query(f'''SELECT trip_counter FROM rawData order by trip_counter
desc limit 1''', con=self._engine)
for i in range(values['trip_counter'][0], 0, -1):
ids.append(i)
finally:
return ids
def _trip_handler(self, number_of_processes):
"""manage the Summary Calculator"""
tasks = self._task_list
# value = self._get_last_trip_number()
for i in range(number_of_processes):
self._todo_trips.append(tasks.pop())
run = True
while run:
for i in range(number_of_processes):
if self._todo_trips[i] == "next":
self._todo_trips[i] = tasks.pop()
if len(tasks) == 0:
run = False
print("everything started")
sys_exit()
def _duplicate_check(self, filename):
"""check if file exist in Database"""
try:
trip_list = pd.read_sql_query(f'SELECT filename FROM {self._log_table};', con=self._engine)
# Check if filename is registered in database
for index, row in trip_list.iterrows():
if row['filename'] == str(filename):
print("found duplicate")
return True
return False
except Exception:
print("duplicate error")
return False
def _upload_trips_raw(self):
"""upload all txt files to DB"""
path = self._path
try: # normal
self._get_last_trip_number()
counter = pd.read_sql_query(
f"SELECT trip_counter FROM {self._raw_data_table} ORDER BY trip_counter DESC limit 1;",
con=self._engine) # get last trip number from Database
finished = int(counter.at[0, 'trip_counter']) # last trip number from Database
except Exception:
finished = 0
regex = re.compile("Trip_20[1-3][0-9]-[0-2][0-9]-[0-3][0-9]_[0-3][0-9]-[0-9][0-9]-[0-9][0-9].txt")
for file in os.listdir(path):
if regex.match(file):
values_of_file = pd.read_csv(path + file, sep='\t')
if not self._duplicate_check(file):
finished = finished + 1
else:
continue
values_of_file['trip_counter'] = pd.DataFrame(
{'trip_counter': [finished for _ in range(len(values_of_file.index))]})
values_of_file.to_sql(self._raw_data_table, con=self._engine, if_exists='append', index='counter')
if not (os.path.isdir(path + "archive/")):
os.makedirs(path + "archive/")
move(path + file, path + 'archive/') # move finished file to archive
trip_log = {'filename': [str(file)],
'Datum': [datetime.now().strftime("%d/%m/%Y, %H:%M:%S")]
}
pd.DataFrame(trip_log).to_sql(self._log_table, con=self._engine, if_exists='append')
del values_of_file
sys_exit()
@staticmethod
def _dataframe_difference(df1, df2):
"""Find rows which are different between two DataFrames."""
comparison_df = df1.merge(df2,
indicator=True,
how='outer')
return comparison_df[comparison_df['_merge'] != 'both']
def _calc_summary(self, process_id):
"""gen _calc_summary trip by trip"""
try:
if self._todo_trips[process_id] == "finished":
sys_exit()
timeout = 0
while self._todo_trips[process_id] == "next":
sleep(1)
if timeout >= 12:
sys_exit()
timeout += 1
query = f"""
SELECT * FROM {self._raw_data_table}
WHERE trip_counter = {self._todo_trips[process_id]} ORDER BY time asc; """
trip_values_database = | pd.read_sql_query(query, self._engine) | pandas.read_sql_query |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_location_stress_error [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_location_stress_error&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerStressErr).
# +
import numpy as np
import scipy.stats as stats
import pandas as pd
import matplotlib.pyplot as plt
from arpym.statistics import simulate_unif_in_ellips
from arpym.tools.logo import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_location_stress_error-parameters)
k_ = 400 # cardinality of stress-test set
t_ = 15 # len of the time series
j_ = 5*10 ** 2 # number of simulations
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_location_stress_error-implementation-step01): Set the stress test set for the true parameters
# +
# generate uniform on unit circle
unif, _, _ = simulate_unif_in_ellips(np.array([2, 2]), np.identity(2),
int(k_/2))
mu = unif[:, 0]
sigma2 = unif[:, 1]
# ensemble error
m = 2*np.log(mu) - 0.5*np.log(sigma2 + mu ** 2)
s2 = 1.2*np.log((sigma2 / mu ** 2) + 1)
location = np.r_[mu, m]
dispersion = np.r_[sigma2, s2]
# vector of true expectations
expectation = np.r_[mu, mu]
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_location_stress_error-implementation-step02): Generate scenarios and compute the error for each estimator
# +
m_hat = np.zeros((j_, k_))
pi_hat = np.zeros((j_, k_))
k_hat = np.zeros((j_, k_))
er_m = np.zeros(k_)
er_pi = np.zeros(k_)
er_k = np.zeros(k_)
for k in range(k_):
# generate scenarios
if k <= int(k_ / 2)-1:
# normal simulations
i_thetak = stats.norm.rvs(location[k], np.sqrt(dispersion[k]),
size=[j_, t_])
else:
# lognormal simulations
i_thetak = stats.lognorm.rvs(np.sqrt(dispersion[k]),
scale=np.exp(location[k]), size=[j_, t_])
# sample mean estimator
m_hat[:, k] = np.mean(i_thetak, axis=1) # simulations
l_m = (m_hat[:, k]-expectation[k]) ** 2 # loss
er_m[k] = np.mean(l_m) # error
# product estimator
pi_hat[:, k] = i_thetak[:, 0] * i_thetak[:, -1] # simulations
l_pi = (pi_hat[:, k]-expectation[k]) ** 2 # loss
er_pi[k] = np.mean(l_pi) # error
# constant estimator
k_hat[:, k] = 3*np.ones(j_) # simulations
l_k = (k_hat[:, k]-expectation[k]) ** 2 # loss
er_k[k] = np.mean(l_k) # error
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_location_stress_error-implementation-step03): Compute robust and ensemble error for each estimator
# +
# robust errors
er_rob_m, i_m = np.max(er_m), np.argmax(er_m)
er_rob_pi, i_pi = np.max(er_pi), np.argmax(er_pi)
er_rob_k, i_k = np.max(er_k), np.argmax(er_k)
# ensemble errors
er_ens_m = np.mean(er_m)
er_ens_pi = np.mean(er_pi)
er_ens_k = np.mean(er_k)
# -
# ## Save database
# +
output = {'j_': pd.Series(k_),
'k_': pd.Series(k_),
'm_hat': pd.Series(m_hat.reshape((j_*k_,))),
'expectation': pd.Series(expectation),
'er_rob_m': pd.Series(er_rob_m),
'er_ens_m': pd.Series(er_ens_m)}
df = | pd.DataFrame(output) | pandas.DataFrame |
from typing import List, Tuple, Dict
import pandas as pd
import numpy as np
import os
import fasttext
import json
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from src.data.utils import emoji2text_tweet
np.random.seed(42)
def remove_quotes_from_saved_file(txt_path: str):
text = ""
with open(txt_path, "r", encoding="utf-8") as f:
for line in f:
if line[0] == "\"" and line[-2] == "\"":
line = line[1:]
line = line[:-2] + "\n"
text += line
os.remove(txt_path)
with open(txt_path, "w", encoding="utf-8") as f:
f.write(text)
def process_dataframe_to_fasttext_format(dataframe: pd.DataFrame, emoji_mapping_items: Dict[str, str]) -> pd.DataFrame:
dataframe = dataframe[['label', 'text']]
dataframe['label'] = "__label__" + dataframe['label']
dataframe = dataframe.astype(str)
dataframe['text'] = dataframe['text'].apply(lambda string: str(emoji2text_tweet(str(string), emoji_mapping_items)))
dataframe['text'] = dataframe['text'].apply(lambda string: str(string).lower())
dataframe['text'] = dataframe['text'].apply(lambda string: str(string).replace("#", ""))
dataframe['row'] = dataframe['label'] + " " + dataframe['text']
return dataframe
def get_train_val_test_dataframes(texts: List, labels: List, train_size: float, val_size: float, test_size: float) -> \
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
assert train_size + val_size + test_size == 1.0
texts_train_val, texts_test, labels_train_val, labels_test = train_test_split(
texts, labels, test_size=test_size, random_state=42, stratify=labels)
texts_train, texts_val, labels_train, labels_val = train_test_split(
texts_train_val,
labels_train_val,
test_size=val_size / (1 - test_size),
random_state=42, stratify=labels_train_val)
train_df = pd.DataFrame(data={"text": texts_train, "label": labels_train})
val_df = pd.DataFrame(data={"text": texts_val, "label": labels_val})
test_df = pd.DataFrame(data={"text": texts_test, "label": labels_test})
return train_df, val_df, test_df
def get_dataframes_for_all_files(files_dict: dict, path_to_dataset_folder: str,
emoji_mapping_items: Dict[str, str]) -> dict:
data_for_fasttext = {}
for dataset, file_path in files_dict.items():
df = | pd.read_csv(file_path) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.