prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import itertools
import numpy as np
import pandas as pd
from respy.config import HUGE_FLOAT
from respy.pre_processing.model_processing import process_params_and_options
from respy.shared import create_base_draws
from respy.shared import downcast_to_smallest_dtype
class StateSpace:
"""The state space of a discrete choice dynamic programming model.
Parameters
----------
params : pandas.Series or pandas.DataFrame
Contains parameters affected by optimization.
options : dict
Dictionary containing optimization independent model options.
Attributes
----------
states : numpy.ndarray
Array with shape (n_states, n_choices + 3) containing period, experiences,
lagged_choice and type information.
indexer : numpy.ndarray
Array with shape (n_periods, n_periods, n_periods, edu_max, n_choices, n_types).
covariates : numpy.ndarray
Array with shape (n_states, n_covariates) containing covariates of each state
necessary to calculate rewards.
wages : numpy.ndarray
Array with shape (n_states_in_period, n_choices) which contains zeros in places
for choices without wages.
nonpec : numpy.ndarray
Array with shape (n_states_in_period, n_choices).
continuation_values : numpy.ndarray
Array with shape (n_states, n_choices + 3) containing containing the emax of
each choice of the subsequent period and the simulated or interpolated maximum
of the current period.
emax_value_functions : numpy.ndarray
Array with shape (n_states, 1) containing the expected maximum of
choice-specific value functions.
"""
def __init__(self, params, options):
params, optim_paras, options = process_params_and_options(params, options)
self.base_draws_sol = create_base_draws(
(options["n_periods"], options["solution_draws"], len(options["choices"])),
options["solution_seed"],
)
states_df, self.indexer = _create_state_space(options)
_states_df = states_df.copy()
_states_df.lagged_choice = _states_df.lagged_choice.cat.codes
_states_df = _states_df.apply(downcast_to_smallest_dtype)
self.states = _states_df.to_numpy()
base_covariates_df = create_base_covariates(states_df, options["covariates"])
base_covariates_df = base_covariates_df.apply(downcast_to_smallest_dtype)
self.covariates = _create_choice_covariates(
base_covariates_df, states_df, params, options
)
self.wages, self.nonpec = _create_reward_components(
self.states[:, -1], self.covariates, optim_paras, options
)
self.is_inadmissible = _create_is_inadmissible_indicator(states_df, options)
self._create_slices_by_periods(options["n_periods"])
def update_systematic_rewards(self, optim_paras, options):
"""Update wages and non-pecuniary rewards.
During the estimation, the rewards need to be updated according to the new
parameters whereas the covariates stay the same.
"""
self.wages, self.nonpec = _create_reward_components(
self.states[:, -1], self.covariates, optim_paras, options
)
def get_attribute_from_period(self, attr, period):
"""Get an attribute of the state space sliced to a given period.
Parameters
----------
attr : str
Attribute name, e.g. ``"states"`` to retrieve ``self.states``.
period : int
Attribute is retrieved from this period.
"""
if attr == "covariates":
raise AttributeError("Attribute covariates cannot be retrieved by periods.")
else:
pass
try:
attribute = getattr(self, attr)
except AttributeError as e:
raise AttributeError(f"StateSpace has no attribute {attr}.").with_traceback(
e.__traceback__
)
try:
indices = self.slices_by_periods[period]
except IndexError as e:
raise IndexError(f"StateSpace has no period {period}.").with_traceback(
e.__traceback__
)
return attribute[indices]
def _create_slices_by_periods(self, n_periods):
"""Create slices to index all attributes in a given period.
It is important that the returned objects are not fancy indices. Fancy indexing
results in copies of array which decrease performance and raise memory usage.
"""
self.slices_by_periods = []
for i in range(n_periods):
idx_start, idx_end = np.where(self.states[:, 0] == i)[0][[0, -1]]
self.slices_by_periods.append(slice(idx_start, idx_end + 1))
def _create_state_space(options):
"""Create the state space.
The state space of the model are all feasible combinations of the period,
experiences, lagged choices and types.
Creating the state space involves two steps. First, the core state space is created
which abstracts from levels of initial experiences and instead uses the minimum
initial experience per choice.
Secondly, the state space is adjusted by all combinations of initial experiences and
also filtered, excluding invalid states.
Notes
-----
Here are some details on the implementation.
- In the process of creating this function, we came up with several different ideas.
Basically, there two fringe cases to find all valid states in the state space.
First, all combinations of state attributes are created. Then, only valid states
are selected. The problem with this approach is that the state space is extremely
sparse. The number of combinations created by using ``itertools.product`` or
``np.meshgrid`` is much higher than the number of valid states. Because of that,
we ran into memory or runtime problems which seemed unsolvable.
The second approach is more similar to the actual process were states are created
by incrementing experiences from period to period. In an extreme case, a function
mimics an agent in one period and recursively creates updates of itself in future
periods. Using this approach, we ran into the Python recursion limit and runtime
problems, but it might be feasible.
These two approaches build the frame for thinking about a solution to this problem
where filtering is, first, applied after creating a massive amount of candidate
states, or, secondly, before creating states. A practical solution must take into
account that some restrictions to the state space are more important than others
and should be applied earlier. Others can be delayed.
As a compromise, we built on the former approach in
:func:`~respy.tests._former_code._create_state_space_kw94` which loops over
choices and possible experience values. Thus, it incorporates some fundamental
restrictions like time limits and needs less filtering.
- The former implementation,
:func:`~respy.tests._former_code._create_state_space_kw94`, had four hard-coded
choices and a loop for every choice with experience accumulation. Thus, this
function is useless if the model requires additional or less choices. For each
number of choices with and without experience, a new function had to be
programmed. The following approach uses the same loops over choices with
experiences, but they are dynamically created by the recursive function
:func:`_create_core_state_space_per_period`.
- There are characteristics of the state space which are independent from all other
state space attributes like types (and almost lagged choices). These attributes
only duplicate the existing state space and can be taken into account in a later
stage of the process.
See also
--------
_create_core_state_space
_create_core_state_space_per_period
_filter_core_state_space
_add_initial_experiences_to_core_state_space
_create_state_space_indexer
"""
df = _create_core_state_space(options)
df = _add_lagged_choice_to_core_state_space(df, options)
df = _filter_core_state_space(df, options)
df = _add_initial_experiences_to_core_state_space(df, options)
df = _add_observables_to_state_space(df, options)
df = _add_types_to_state_space(df, options["n_types"])
df = df.sort_values("period").reset_index(drop=True)
indexer = _create_state_space_indexer(df, options)
df.lagged_choice = pd.Categorical(df.lagged_choice, categories=options["choices"])
return df, indexer
def _create_core_state_space(options):
"""Create the core state space.
The core state space abstracts from initial experiences and uses the maximum range
between initial experiences and maximum experiences to cover the whole range. The
combinations of initial experiences are applied later in
:func:`_add_initial_experiences_to_core_state_space`.
See also
--------
_create_core_state_space_per_period
"""
minimal_initial_experience = np.array(
[
np.min(options["choices"][choice]["start"])
for choice in options["choices_w_exp"]
],
dtype=np.uint8,
)
maximum_exp = np.array(
[options["choices"][choice]["max"] for choice in options["choices_w_exp"]],
dtype=np.uint8,
)
additional_exp = maximum_exp - minimal_initial_experience
exp_cols = [f"exp_{choice}" for choice in options["choices_w_exp"]]
container = []
for period in np.arange(options["n_periods"], dtype=np.uint8):
data = _create_core_state_space_per_period(
period,
additional_exp,
options,
np.zeros(len(options["choices_w_exp"]), dtype=np.uint8),
)
df_ = pd.DataFrame.from_records(data, columns=exp_cols)
df_.insert(0, "period", period)
container.append(df_)
df = pd.concat(container, axis="rows", sort=False)
return df
def _create_core_state_space_per_period(
period, additional_exp, options, experiences, pos=0
):
"""Create core state space per period.
First, this function returns a state combined with all possible lagged choices and
types.
Secondly, if there exists a choice with experience in ``additional_exp[pos]``, loop
over all admissible experiences, update the state and pass it to the same function,
but moving to the next choice which accumulates experience.
Parameters
----------
period : int
Number of period.
additional_exp : numpy.ndarray
Array with shape (n_choices_w_exp,) containing integers representing the
additional experience per choice which is admissible. This is the difference
between the maximum experience and minimum of initial experience per choice.
experiences : None or numpy.ndarray, default None
Array with shape (n_choices_w_exp,) which contains current experience of state.
pos : int, default 0
Index for current choice with experience. If index is valid for array
``experiences``, then loop over all admissible experience levels of this choice.
Otherwise, ``experiences[pos]`` would lead to an :exc:`IndexError`.
"""
# Return experiences combined with lagged choices and types.
yield experiences
# Check if there is an additional choice left to start another loop.
if pos < experiences.shape[0]:
# Upper bound of additional experience is given by the remaining time or the
# maximum experience which can be accumulated in experience[pos].
remaining_time = period - experiences.sum()
max_experience = additional_exp[pos]
# +1 is necessary so that the remaining time or max_experience is exhausted.
for i in np.arange(min(remaining_time, max_experience) + 1, dtype=np.uint8):
# Update experiences and call the same function with the next choice.
updated_experiences = experiences.copy()
updated_experiences[pos] += i
yield from _create_core_state_space_per_period(
period, additional_exp, options, updated_experiences, pos + 1
)
def _add_lagged_choice_to_core_state_space(df, options):
container = []
for choice in options["choices"]:
df_ = df.copy()
df_["lagged_choice"] = choice
container.append(df_)
df = pd.concat(container, axis="rows", sort=False)
return df
def _filter_core_state_space(df, options):
"""Applies filters to the core state space.
Sometimes, we want to apply filters to a group of choices. Thus, use the following
shortcuts.
- ``i`` is replaced with every choice with experience.
- ``j`` is replaced with every choice without experience.
- ``k`` is replaced with every choice with a wage.
Parameters
----------
df : pandas.DataFrame
Contains the core state space.
options : dict
Contains model options and the filters to reduce the core state space.
"""
for definition in options["core_state_space_filters"]:
# If "{i}" is in definition, loop over choices with experiences.
if "{i}" in definition:
for i in options["choices_w_exp"]:
df = df.loc[~df.eval(definition.format(i=i))]
# If "{j}" is in definition, loop over choices without experiences.
elif "{j}" in definition:
for j in options["choices_wo_exp"]:
df = df.loc[~df.eval(definition.format(j=j))]
# If "{k}" is in definition, loop over choices with wage.
elif "{k}" in definition:
for k in options["choices_w_wage"]:
df = df.loc[~df.eval(definition.format(k=k))]
else:
df = df.loc[~df.eval(definition)]
return df
def _add_initial_experiences_to_core_state_space(df, options):
"""Add initial experiences to core state space.
As the core state space abstracts from differences in initial experiences, this
function loops through all combinations from initial experiences and adds them to
existing experiences. After that, we need to check whether the maximum in
experiences is still binding.
"""
# Create combinations of starting values
initial_experiences_combinations = itertools.product(
*[options["choices"][choice]["start"] for choice in options["choices_w_exp"]]
)
maximum_exp = np.array(
[options["choices"][choice]["max"] for choice in options["choices_w_exp"]]
)
exp_cols = df.filter(like="exp_").columns.tolist()
container = []
for initial_exp in initial_experiences_combinations:
df_ = df.copy()
# Add initial experiences.
df_[exp_cols] += initial_exp
# Check that max_experience is still fulfilled.
df_ = df_.loc[df_[exp_cols].le(maximum_exp).all(axis="columns")].copy()
container.append(df_)
df = | pd.concat(container, axis="rows", sort=False) | pandas.concat |
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
assert isnull(grouped['B'].first()['foo'])
assert isnull(grouped['B'].last()['foo'])
assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name, 0
assert expected.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
import os
import pandas as pd
import csv
import json
import argparse
def aggregate_parameter_sets(huc_list_path,calibration_stat_folder,summary_file, mannings_json):
outfolder = os.path.dirname(summary_file)
aggregate_output_dir = os.path.join(outfolder, 'aggregate_metrics')
if not os.path.exists(aggregate_output_dir):
os.makedirs(aggregate_output_dir)
mannings_summary_table = pd.DataFrame(columns = ['metric', 'value', 'stream_order', 'mannings_n', 'huc', 'interval'])
with open(huc_list_path) as f:
huc_list = [huc.rstrip() for huc in f]
for huc in huc_list:
branch_dir = os.path.join('data','test_cases',str(huc) + '_ble','performance_archive','development_versions', calibration_stat_folder)
for stream_order in os.listdir(branch_dir):
stream_order_dir = os.path.join(branch_dir, stream_order)
for mannings_value in os.listdir(stream_order_dir):
mannings_value_dir = os.path.join(stream_order_dir, mannings_value)
for flood_recurrence in os.listdir(mannings_value_dir):
flood_recurrence_dir = os.path.join(mannings_value_dir, flood_recurrence)
total_area_stats = pd.read_csv(os.path.join(flood_recurrence_dir, 'total_area_stats.csv'), index_col=0)
total_area_stats = total_area_stats.loc[['true_positives_count', 'true_negatives_count', 'false_positives_count', 'false_negatives_count','masked_count', 'cell_area_m2', 'CSI'],:]
total_area_stats = total_area_stats.reset_index()
total_area_stats_table = | pd.DataFrame({'metric': total_area_stats.iloc[:,0], 'value': total_area_stats.iloc[:,1], 'stream_order': stream_order, 'mannings_n': mannings_value, 'huc': huc, 'interval': flood_recurrence}) | pandas.DataFrame |
from functools import partial
from itertools import repeat, chain
import re
from scanpy import datasets
import numpy as np
from numpy.core.fromnumeric import trace
import pandas as pd
import pytest
from anndata import AnnData
from scipy import sparse
import scanpy as sc
TRANSPOSE_PARAMS = pytest.mark.parametrize(
"dim,transform,func",
[
(
"obs",
lambda x: x,
sc.get.obs_df,
),
(
"var",
lambda x: x.T,
sc.get.var_df,
),
],
ids=["obs_df", "var_df"],
)
@pytest.fixture
def adata():
"""
adata.X is np.ones((2, 2))
adata.layers['double'] is sparse np.ones((2,2)) * 2 to also test sparse matrices
"""
return AnnData(
X=np.ones((2, 2)),
obs=pd.DataFrame(
{"obs1": [0, 1], "obs2": ["a", "b"]}, index=["cell1", "cell2"]
),
var=pd.DataFrame(
{"gene_symbols": ["genesymbol1", "genesymbol2"]}, index=["gene1", "gene2"]
),
layers={"double": sparse.csr_matrix(np.ones((2, 2)), dtype=int) * 2},
dtype=int,
)
########################
# obs_df, var_df tests #
########################
def test_obs_df(adata):
adata.obsm["eye"] = np.eye(2, dtype=int)
adata.obsm["sparse"] = sparse.csr_matrix(np.eye(2), dtype='float64')
# make raw with different genes than adata
adata.raw = AnnData(
X=np.array([[1, 2, 3], [2, 4, 6]]),
var=pd.DataFrame(
{"gene_symbols": ["raw1", "raw2", 'raw3']},
index=["gene2", "gene3", "gene4"],
),
dtype='float64',
)
pd.testing.assert_frame_equal(
sc.get.obs_df(
adata, keys=["gene2", "obs1"], obsm_keys=[("eye", 0), ("sparse", 1)]
),
pd.DataFrame(
{"gene2": [1, 1], "obs1": [0, 1], "eye-0": [1, 0], "sparse-1": [0.0, 1.0]},
index=adata.obs_names,
),
)
pd.testing.assert_frame_equal(
sc.get.obs_df(
adata,
keys=["genesymbol2", "obs1"],
obsm_keys=[("eye", 0), ("sparse", 1)],
gene_symbols="gene_symbols",
),
pd.DataFrame(
{
"genesymbol2": [1, 1],
"obs1": [0, 1],
"eye-0": [1, 0],
"sparse-1": [0.0, 1.0],
},
index=adata.obs_names,
),
)
pd.testing.assert_frame_equal(
sc.get.obs_df(adata, keys=["gene2", "obs1"], layer="double"),
pd.DataFrame({"gene2": [2, 2], "obs1": [0, 1]}, index=adata.obs_names),
)
pd.testing.assert_frame_equal(
sc.get.obs_df(
adata,
keys=["raw2", "raw3", "obs1"],
gene_symbols="gene_symbols",
use_raw=True,
),
pd.DataFrame(
{"raw2": [2.0, 4.0], "raw3": [3.0, 6.0], "obs1": [0, 1]},
index=adata.obs_names,
),
)
# test only obs
pd.testing.assert_frame_equal(
sc.get.obs_df(adata, keys=["obs1", "obs2"]),
pd.DataFrame({"obs1": [0, 1], "obs2": ["a", "b"]}, index=["cell1", "cell2"]),
)
# test only var
pd.testing.assert_frame_equal(
sc.get.obs_df(adata, keys=["gene1", "gene2"]),
pd.DataFrame({"gene1": [1, 1], "gene2": [1, 1]}, index=adata.obs_names),
)
pd.testing.assert_frame_equal(
sc.get.obs_df(adata, keys=["gene1", "gene2"]),
pd.DataFrame({"gene1": [1, 1], "gene2": [1, 1]}, index=adata.obs_names),
)
# test handling of duplicated keys (in this case repeated gene names)
pd.testing.assert_frame_equal(
sc.get.obs_df(adata, keys=["gene1", "gene2", "gene1", "gene1"]),
pd.DataFrame(
{"gene1": [1, 1], "gene2": [1, 1]},
index=adata.obs_names,
)[["gene1", "gene2", "gene1", "gene1"]],
)
badkeys = ["badkey1", "badkey2"]
with pytest.raises(KeyError) as badkey_err:
sc.get.obs_df(adata, keys=badkeys)
with pytest.raises(AssertionError):
sc.get.obs_df(adata, keys=["gene1"], use_raw=True, layer="double")
assert all(badkey_err.match(k) for k in badkeys)
# test non unique index
adata = sc.AnnData(
np.arange(16).reshape(4, 4),
obs=pd.DataFrame(index=["a", "a", "b", "c"]),
var=pd.DataFrame(index=[f"gene{i}" for i in range(4)]),
)
df = sc.get.obs_df(adata, ["gene1"])
pd.testing.assert_index_equal(df.index, adata.obs_names)
def test_repeated_gene_symbols():
"""
Gene symbols column allows repeats, but we can't unambiguously get data for these values.
"""
gene_symbols = [f"symbol_{i}" for i in ["a", "b", "b", "c"]]
var_names = pd.Index([f"id_{i}" for i in ["a", "b.1", "b.2", "c"]])
adata = sc.AnnData(
np.arange(3 * 4).reshape((3, 4)),
var=pd.DataFrame({"gene_symbols": gene_symbols}, index=var_names),
)
with pytest.raises(KeyError, match="symbol_b"):
sc.get.obs_df(adata, ["symbol_b"], gene_symbols="gene_symbols")
expected = pd.DataFrame(
np.arange(3 * 4).reshape((3, 4))[:, [0, 3]].astype(np.float32),
index=adata.obs_names,
columns=["symbol_a", "symbol_c"],
)
result = sc.get.obs_df(adata, ["symbol_a", "symbol_c"], gene_symbols="gene_symbols")
pd.testing.assert_frame_equal(expected, result)
def test_backed_vs_memory():
"compares backed vs. memory"
from pathlib import Path
# get location test h5ad file in datasets
HERE = Path(sc.__file__).parent
adata_file = HERE / "datasets/10x_pbmc68k_reduced.h5ad"
adata_backed = sc.read(adata_file, backed='r')
adata = sc.read_h5ad(adata_file)
# use non-sequential list of genes
genes = list(adata.var_names[20::-2])
obs_names = ['bulk_labels', 'n_genes']
pd.testing.assert_frame_equal(
sc.get.obs_df(adata, keys=genes + obs_names),
sc.get.obs_df(adata_backed, keys=genes + obs_names),
)
# use non-sequential list of cell indices
cell_indices = list(adata.obs_names[30::-2])
pd.testing.assert_frame_equal(
sc.get.var_df(adata, keys=cell_indices + ["highly_variable"]),
sc.get.var_df(adata_backed, keys=cell_indices + ["highly_variable"]),
)
def test_column_content():
"uses a larger dataset to test column order and content"
adata = sc.datasets.pbmc68k_reduced()
# test that columns content is correct for obs_df
query = ['CST3', 'NKG7', 'GNLY', 'louvain', 'n_counts', 'n_genes']
df = sc.get.obs_df(adata, query)
for col in query:
assert col in df
np.testing.assert_array_equal(query, df.columns)
np.testing.assert_array_equal(df[col].values, adata.obs_vector(col))
# test that columns content is correct for var_df
cell_ids = list(adata.obs.sample(5).index)
query = cell_ids + ['highly_variable', 'dispersions_norm', 'dispersions']
df = sc.get.var_df(adata, query)
np.testing.assert_array_equal(query, df.columns)
for col in query:
np.testing.assert_array_equal(df[col].values, adata.var_vector(col))
def test_var_df(adata):
adata.varm["eye"] = np.eye(2, dtype=int)
adata.varm["sparse"] = sparse.csr_matrix(np.eye(2), dtype='float64')
pd.testing.assert_frame_equal(
sc.get.var_df(
adata,
keys=["cell2", "gene_symbols"],
varm_keys=[("eye", 0), ("sparse", 1)],
),
pd.DataFrame(
{
"cell2": [1, 1],
"gene_symbols": ["genesymbol1", "genesymbol2"],
"eye-0": [1, 0],
"sparse-1": [0.0, 1.0],
},
index=adata.var_names,
),
)
pd.testing.assert_frame_equal(
sc.get.var_df(adata, keys=["cell1", "gene_symbols"], layer="double"),
pd.DataFrame(
{"cell1": [2, 2], "gene_symbols": ["genesymbol1", "genesymbol2"]},
index=adata.var_names,
),
)
# test only cells
pd.testing.assert_frame_equal(
sc.get.var_df(adata, keys=["cell1", "cell2"]),
pd.DataFrame(
{"cell1": [1, 1], "cell2": [1, 1]},
index=adata.var_names,
),
)
# test only var columns
pd.testing.assert_frame_equal(
sc.get.var_df(adata, keys=["gene_symbols"]),
pd.DataFrame(
{"gene_symbols": ["genesymbol1", "genesymbol2"]},
index=adata.var_names,
),
)
# test handling of duplicated keys (in this case repeated cell names)
pd.testing.assert_frame_equal(
sc.get.var_df(adata, keys=["cell1", "cell2", "cell2", "cell1"]),
pd.DataFrame(
{"cell1": [1, 1], "cell2": [1, 1]},
index=adata.var_names,
)[["cell1", "cell2", "cell2", "cell1"]],
)
badkeys = ["badkey1", "badkey2"]
with pytest.raises(KeyError) as badkey_err:
sc.get.var_df(adata, keys=badkeys)
assert all(badkey_err.match(k) for k in badkeys)
@TRANSPOSE_PARAMS
def test_just_mapping_keys(dim, transform, func):
# https://github.com/theislab/scanpy/issues/1634
# Test for error where just passing obsm_keys, but not keys, would cause error.
mapping_attr = f"{dim}m"
kwargs = {f"{mapping_attr}_keys": [("array", 0), ("array", 1)]}
adata = transform(
sc.AnnData(
X=np.zeros((5, 5)),
obsm={
"array": np.arange(10).reshape((5, 2)),
},
)
)
expected = pd.DataFrame(
np.arange(10).reshape((5, 2)),
index=getattr(adata, f"{dim}_names"),
columns=["array-0", "array-1"],
)
result = func(adata, **kwargs)
pd.testing.assert_frame_equal(expected, result)
##################################
# Test errors for obs_df, var_df #
##################################
def test_non_unique_cols_value_error():
M, N = 5, 3
adata = sc.AnnData(
X=np.zeros((M, N)),
obs=pd.DataFrame(
np.arange(M * 2).reshape((M, 2)),
columns=["repeated_col", "repeated_col"],
index=[f"cell_{i}" for i in range(M)],
),
var=pd.DataFrame(
index=[f"gene_{i}" for i in range(N)],
),
)
with pytest.raises(ValueError):
sc.get.obs_df(adata, ["repeated_col"])
def test_non_unique_var_index_value_error():
adata = sc.AnnData(
X=np.ones((2, 3)),
obs=pd.DataFrame(index=["cell-0", "cell-1"]),
var=pd.DataFrame(index=["gene-0", "gene-0", "gene-1"]),
)
with pytest.raises(ValueError):
sc.get.obs_df(adata, ["gene-0"])
def test_keys_in_both_obs_and_var_index_value_error():
M, N = 5, 3
adata = sc.AnnData(
X=np.zeros((M, N)),
obs=pd.DataFrame(
np.arange(M),
columns=["var_id"],
index=[f"cell_{i}" for i in range(M)],
),
var=pd.DataFrame(
index=["var_id"] + [f"gene_{i}" for i in range(N - 1)],
),
)
with pytest.raises(KeyError, match="var_id"):
sc.get.obs_df(adata, ["var_id"])
@TRANSPOSE_PARAMS
def test_repeated_cols(dim, transform, func):
adata = transform(
sc.AnnData(
np.ones((5, 10)),
obs=pd.DataFrame(
np.ones((5, 2)), columns=["a_column_name", "a_column_name"]
),
var=pd.DataFrame(index=[f"gene-{i}" for i in range(10)]),
)
)
# (?s) is inline re.DOTALL
with pytest.raises(ValueError, match=rf"(?s)^adata\.{dim}.*a_column_name.*$"):
func(adata, ["gene_5"])
@TRANSPOSE_PARAMS
def test_repeated_index_vals(dim, transform, func):
# THis one could be reverted, see:
# https://github.com/theislab/scanpy/pull/1583#issuecomment-770641710
alt_dim = ["obs", "var"][dim == "obs"]
adata = transform(
sc.AnnData(
np.ones((5, 10)),
var=pd.DataFrame(
index=["repeated_id"] * 2 + [f"gene-{i}" for i in range(8)]
),
)
)
with pytest.raises(
ValueError,
match=rf"(?s)adata\.{alt_dim}_names.*{alt_dim}_names_make_unique",
):
func(adata, "gene_5")
@pytest.fixture(
params=[
"obs_df",
"var_df",
"obs_df:use_raw",
"obs_df:gene_symbols",
"obs_df:gene_symbols,use_raw",
]
)
def shared_key_adata(request):
kind = request.param
adata = sc.AnnData(
np.arange(50).reshape((5, 10)),
obs=pd.DataFrame(np.zeros((5, 1)), columns=["var_id"]),
var=pd.DataFrame(index=["var_id"] + [f"gene_{i}" for i in range(1, 10)]),
)
if kind == "obs_df":
return (
adata,
sc.get.obs_df,
r"'var_id'.* adata\.obs .* adata.var_names",
)
elif kind == "var_df":
return (
adata.T,
sc.get.var_df,
r"'var_id'.* adata\.var .* adata.obs_names",
)
elif kind == "obs_df:use_raw":
adata.raw = adata
adata.var_names = [f"gene_{i}" for i in range(10)]
return (
adata,
partial(sc.get.obs_df, use_raw=True),
r"'var_id'.* adata\.obs .* adata\.raw\.var_names",
)
elif kind == "obs_df:gene_symbols":
adata.var["gene_symbols"] = adata.var_names
adata.var_names = [f"gene_{i}" for i in range(10)]
return (
adata,
partial(sc.get.obs_df, gene_symbols="gene_symbols"),
r"'var_id'.* adata\.obs .* adata\.var\['gene_symbols'\]",
)
elif kind == "obs_df:gene_symbols,use_raw":
base = adata.copy()
adata.var["gene_symbols"] = adata.var_names
adata.var_names = [f"gene_{i}" for i in range(10)]
base.raw = adata
return (
base,
partial(
sc.get.obs_df,
gene_symbols="gene_symbols",
use_raw=True,
),
r"'var_id'.* adata\.obs .* adata\.raw\.var\['gene_symbols'\]",
)
else:
assert False
def test_shared_key_errors(shared_key_adata):
adata, func, regex = shared_key_adata
# This should error
with pytest.raises(KeyError, match=regex):
func(adata, keys=["var_id"])
# This shouldn't error
_ = func(adata, keys=["gene_2"])
##############################
# rank_genes_groups_df tests #
##############################
def test_rank_genes_groups_df():
a = np.zeros((20, 3))
a[:10, 0] = 5
adata = AnnData(
a,
obs=pd.DataFrame(
{"celltype": list(chain(repeat("a", 10), repeat("b", 10)))},
index=[f"cell{i}" for i in range(a.shape[0])],
),
var=pd.DataFrame(index=[f"gene{i}" for i in range(a.shape[1])]),
)
sc.tl.rank_genes_groups(adata, groupby="celltype", method="wilcoxon", pts=True)
dedf = sc.get.rank_genes_groups_df(adata, "a")
assert dedf["pvals"].value_counts()[1.0] == 2
assert sc.get.rank_genes_groups_df(adata, "a", log2fc_max=0.1).shape[0] == 2
assert sc.get.rank_genes_groups_df(adata, "a", log2fc_min=0.1).shape[0] == 1
assert sc.get.rank_genes_groups_df(adata, "a", pval_cutoff=0.9).shape[0] == 1
del adata.uns["rank_genes_groups"]
sc.tl.rank_genes_groups(
adata,
groupby="celltype",
method="wilcoxon",
key_added="different_key",
pts=True,
)
with pytest.raises(KeyError):
sc.get.rank_genes_groups_df(adata, "a")
dedf2 = sc.get.rank_genes_groups_df(adata, "a", key="different_key")
| pd.testing.assert_frame_equal(dedf, dedf2) | pandas.testing.assert_frame_equal |
import os
import base64
from io import BytesIO
from zipfile import ZipFile
from typing import List, Dict
from pathlib import Path
import pandas as pd
import requests
from fake_useragent import UserAgent
from bio_embeddings.embed.prottrans_bert_bfd_embedder import ProtTransBertBFDEmbedder
import torch
class BertModelManager:
"""
Manager class is responsible to download and unzip
BERT pre-trained model for protein embedding
"""
BERT_URL = base64.b64decode(
"aHR0cDovL21haW50ZW5hbmNlLmRhbGxhZ28udXMvcHVibGljL2Vt"
"YmVkZGluZ3MvZW1iZWRkaW5nX21vZGVscy9iZXJ0L2JlcnQuemlw"
)
STATUS_CODE_200 = 200
def __init__(self, model_dir: str = "bert_model"):
self.model_dir = model_dir
if not os.path.exists(model_dir):
os.mkdir(self.model_dir)
@staticmethod
def _get_headers() -> Dict:
"""
Return header dict with random User-Agent to support request
and to avoid being blocked by the server
"""
ua = UserAgent()
ua.update()
return {"User-Agent": ua.random}
def download_model(self) -> Path:
"""
Download BERT pre-trained model and unzip it into directory
This procedure should be executed once and the result
loaded by BertEmbedding class instance
"""
path = Path(self.model_dir) / "bert"
# If model directory exists then return it immediately
if os.path.exists(path):
print("[DEBUG] BERT model exists")
return path
else:
print("[DEBUG] BERT model is downloading now")
headers = self._get_headers()
with requests.get(self.BERT_URL, headers=headers) as response:
assert response.status_code == self.STATUS_CODE_200
with ZipFile(BytesIO(response.content)) as zip_file:
zip_file.extractall(self.model_dir)
return path
class BertEmbedding:
"""
Embedding class is responsible to load BERT pre-trained model for proteins
and execute vectorization on single protein or set of proteins which represent
single bacteriophage
In the case of set of proteins the vectorization returns averaged numeric vector
"""
CPU = "cpu"
FEATURE_SPACE = 1024
SUPPORTED_COLUMNS = ["sequence", "class"]
SUPPORTED_COLUMNS_AVG = ["sequence", "name"]
def __init__(self, model_dir: str, cuda_device: int = None):
"""
If you have an access to GPU with CUDA support the embedding will compute it
on your graphic card If not then CPU and RAM will be consumed
"""
self.model_dir = model_dir
if not os.path.exists(self.model_dir):
raise Exception("BERT model wasn't downloaded yet")
self.embedder = ProtTransBertBFDEmbedder(model_directory=self.model_dir)
self.cuda_device = cuda_device
# Select GPU card (if you have more than one)
if self.cuda_device and torch.cuda.is_available():
self.device = f"cuda:{self.cuda_device}"
else:
self.device = self.CPU
def _set_column_names(self) -> None:
"""
Set a list with embedding column names
"""
self.columns = [f"BERT_{index}" for index in range(self.FEATURE_SPACE)]
def _get_vectors(self, df: pd.DataFrame, bacteriophage_level: bool = False) -> List:
"""
Return the embedding result represented by lists or averaged list with 1024 digits
"""
with torch.no_grad():
vectors = []
for protein in df.itertuples():
embedding = self.embedder.embed(protein.sequence)
protein_vector = self.embedder.reduce_per_protein(embedding)
vectors.append(protein_vector)
if bacteriophage_level:
print("[DEBUG] Protein vectors are averaging to form a bacteriophage")
vectors = [
torch.tensor(vectors, device=self.device).mean(dim=0).tolist()
]
return vectors
def transform(
self, df: pd.DataFrame, bacteriophage_level: bool = False
) -> pd.DataFrame:
"""
Execute BERT embedding on DataFrame with two supported type of columns:
- "sequence" and "class"
- "sequence" and "name"
The first case is expected for single protein vectorization
The second case is expected for set of proteins which represent
single bacteriophage
"""
if bacteriophage_level:
# "sequence" and "name" columns are expected
assert self.SUPPORTED_COLUMNS_AVG == list(
df[self.SUPPORTED_COLUMNS_AVG].columns
)
else:
# "sequence" and "class" columns are expected
assert self.SUPPORTED_COLUMNS == list(df[self.SUPPORTED_COLUMNS].columns)
data = self._get_vectors(df, bacteriophage_level)
self._set_column_names()
result_df = | pd.DataFrame(data=data, columns=self.columns) | pandas.DataFrame |
import pickle
from io import BytesIO
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
def test_categorical_numerical_nans():
"""Ensure CategoricalTransformer works on numerical + nan only columns."""
data = pd.Series([1, 2, float('nan'), np.nan])
transformer = CategoricalTransformer()
transformer.fit(data)
transformed = transformer.transform(data)
reverse = transformer.reverse_transform(transformed)
pd.testing.assert_series_equal(reverse, data)
def test_categoricaltransformer_pickle_nans():
"""Ensure that CategoricalTransformer can be pickled and loaded with nan value."""
# setup
data = pd.Series([1, 2, float('nan'), np.nan])
transformer = CategoricalTransformer()
transformer.fit(data)
transformed = transformer.transform(data)
# create pickle file on memory
bytes_io = BytesIO()
pickle.dump(transformer, bytes_io)
# rewind
bytes_io.seek(0)
# run
pickled_transformer = pickle.load(bytes_io)
# assert
pickle_transformed = pickled_transformer.transform(data)
np.testing.assert_array_equal(pickle_transformed, transformed)
def test_categoricaltransformer_strings():
"""Test the CategoricalTransformer on string data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on string data. Expect that the reverse transformed data
is the same as the input.
Input:
- 4 rows of string data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series(['a', 'b', 'a', 'c'])
transformer = CategoricalTransformer()
# run
reverse = transformer.reverse_transform(transformer.fit_transform(data))
# assert
pd.testing.assert_series_equal(data, reverse)
def test_categoricaltransformer_strings_2_categories():
"""Test the CategoricalTransformer on string data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on string data, when there are 2 categories of strings with
the same value counts. Expect that the reverse transformed data is the
same as the input.
Input:
- 4 rows of string data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series(['a', 'b', 'a', 'b'])
transformer = CategoricalTransformer()
reverse = transformer.reverse_transform(transformer.fit_transform(data))
# assert
pd.testing.assert_series_equal(data, reverse)
def test_categoricaltransformer_integers():
"""Test the CategoricalTransformer on integer data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on integer data. Expect that the reverse transformed data is the
same as the input.
Input:
- 4 rows of int data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series([1, 2, 3, 2])
transformer = CategoricalTransformer()
# run
reverse = transformer.reverse_transform(transformer.fit_transform(data))
# assert
pd.testing.assert_series_equal(data, reverse)
def test_categoricaltransformer_bool():
"""Test the CategoricalTransformer on boolean data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on boolean data. Expect that the reverse transformed data is the
same as the input.
Input:
- 4 rows of bool data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series([True, False, True, False])
transformer = CategoricalTransformer()
# run
reverse = transformer.reverse_transform(transformer.fit_transform(data))
# assert
pd.testing.assert_series_equal(data, reverse)
def test_categoricaltransformer_mixed():
"""Test the CategoricalTransformer on mixed type data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on mixed type data. Expect that the reverse transformed data is
the same as the input.
Input:
- 4 rows of mixed data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series([True, 'a', 1, None])
transformer = CategoricalTransformer()
# run
reverse = transformer.reverse_transform(transformer.fit_transform(data))
# assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test_categoricaltransformer_mixed_low_virtual_memory(psutil_mock):
"""Test the CategoricalTransformer on mixed type data with low virtual memory.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on mixed type data, when there is low virtual memory. Expect that the
reverse transformed data is the same as the input.
Input:
- 4 rows of mixed data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series([True, 'a', 1, None])
transformer = CategoricalTransformer()
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# run
reverse = transformer.reverse_transform(transformer.fit_transform(data))
# assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test_categoricaltransformer_mixed_more_rows(psutil_mock):
"""Test the CategoricalTransformer on mixed type data with low virtual memory.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on mixed type data, when there is low virtual memory and a larger
number of rows. Expect that the reverse transformed data is the same as the input.
Input:
- 4 rows of mixed data
Output:
- The reverse transformed data
"""
# setup
data = pd.Series([True, 'a', 1, None])
transform_data = pd.Series(['a', 1, None, 'a', True, 1])
transformer = CategoricalTransformer()
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# run
transformer.fit(data)
transformed = transformer.transform(transform_data)
reverse = transformer.reverse_transform(transformed)
# assert
pd.testing.assert_series_equal(transform_data, reverse)
def test_one_hot_numerical_nans():
"""Ensure OneHotEncodingTransformer works on numerical + nan only columns."""
data = pd.Series([1, 2, float('nan'), np.nan])
transformer = OneHotEncodingTransformer()
transformer.fit(data)
transformed = transformer.transform(data)
reverse = transformer.reverse_transform(transformed)
pd.testing.assert_series_equal(reverse, data)
def test_label_numerical_2d_array():
"""Ensure LabelEncodingTransformer works on numerical + nan only columns."""
data = | pd.Series([1, 2, 3, 4]) | pandas.Series |
import pandas as pd
#importing all the data from CSV files
master_df = pd.read_csv('People.csv', usecols=['playerID', 'nameFirst', 'nameLast', 'bats', 'throws', 'debut', 'finalGame'])
fielding_df = pd.read_csv('Fielding.csv',usecols=['playerID','yearID','stint','teamID','lgID','POS','G','GS','InnOuts','PO','A','E','DP'])
batting_df = pd.read_csv('Batting.csv')
awards_df = | pd.read_csv('AwardsPlayers.csv', usecols=['playerID','awardID','yearID']) | pandas.read_csv |
'''
'''
import os, glob
try:
from icecube import dataclasses, icetray, dataio
from icecube import genie_icetray
except ModuleNotFoundError:
# Not running in IceTray
pass
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import sqlalchemy
import time
from multiprocessing import Pool
import pickle
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-config", "--config", type=str, required=True)
def contains_retro(frame):
try:
frame['L7_reconstructed_zenith']
return True
except:
return False
def build_standard_extraction():
standard_truths = {'energy': 'MCInIcePrimary.energy',
'position_x': 'MCInIcePrimary.pos.x',
'position_y': 'MCInIcePrimary.pos.y',
'position_z': 'MCInIcePrimary.pos.z',
'azimuth': 'MCInIcePrimary.dir.azimuth',
'zenith': 'MCInIcePrimary.dir.zenith',
'pid': 'MCInIcePrimary.pdg_encoding',
'event_time': 'event_time',
'sim_type': 'sim_type',
'interaction_type': 'interaction_type',
'elasticity': 'elasticity',
'RunID': 'RunID',
'SubrunID': 'SubrunID',
'EventID': 'EventID',
'SubEventID': 'SubEventID'}
return standard_truths
def case_handle_this(frame, sim_type):
if sim_type != 'noise':
MCInIcePrimary = frame['MCInIcePrimary']
else:
MCInIcePrimary = None
if sim_type != 'muongun' and sim_type != 'noise':
interaction_type = frame["I3MCWeightDict"]["InteractionType"]
elasticity = frame['I3GENIEResultDict']['y']
else:
interaction_type = -1
elasticity = -1
return MCInIcePrimary, interaction_type, elasticity
def is_montecarlo(frame):
mc = True
try:
frame['MCInIcePrimary']
except:
mc = False
return mc
def build_blank_extraction():
## Please note that if the simulation type is pure noise or real that these values will be appended to the truth table
blank_extraction = {'energy_log10': '-1',
'position_x': '-1',
'position_y': '-1',
'position_z': '-1',
'azimuth': '-1',
'zenith': '-1',
'pid': '-1',
'event_time': 'event_time',
'sim_type': 'sim_type',
'interaction_type': '-1',
'elasticity': '-1',
'RunID': 'RunID',
'SubrunID': 'SubrunID',
'EventID': 'EventID',
'SubEventID': 'SubEventID'}
return blank_extraction
def build_retro_extraction(is_mc):
retro_extraction = {'azimuth_retro': 'frame["L7_reconstructed_azimuth"].value',
'time_retro': 'frame["L7_reconstructed_time"].value',
'energy_retro': 'frame["L7_reconstructed_total_energy"].value',
'position_x_retro': 'frame["L7_reconstructed_vertex_x"].value',
'position_y_retro': 'frame["L7_reconstructed_vertex_y"].value',
'position_z_retro': 'frame["L7_reconstructed_vertex_z"].value',
'zenith_retro': 'frame["L7_reconstructed_zenith"].value',
'azimuth_sigma': 'frame["L7_retro_crs_prefit__azimuth_sigma_tot"].value',
'position_x_sigma': 'frame["L7_retro_crs_prefit__x_sigma_tot"].value',
'position_y_sigma': 'frame["L7_retro_crs_prefit__y_sigma_tot"].value',
'position_z_sigma': 'frame["L7_retro_crs_prefit__z_sigma_tot"].value',
'time_sigma': 'frame["L7_retro_crs_prefit__time_sigma_tot"].value',
'zenith_sigma': 'frame["L7_retro_crs_prefit__zenith_sigma_tot"].value',
'energy_sigma': 'frame["L7_retro_crs_prefit__energy_sigma_tot"].value',
'cascade_energy_retro': 'frame["L7_reconstructed_cascade_energy"].value',
'track_energy_retro': 'frame["L7_reconstructed_track_energy"].value',
'track_length_retro': 'frame["L7_reconstructed_track_length"].value',
'lvl7_probnu': 'frame["L7_MuonClassifier_FullSky_ProbNu"].value',
'lvl4_probnu': 'frame["L4_MuonClassifier_Data_ProbNu"].value',
'lvl7_prob_track': 'frame["L7_PIDClassifier_FullSky_ProbTrack"].value'}
if is_mc:
retro_extraction['osc_weight'] = 'frame["I3MCWeightDict"]["weight"]'
return retro_extraction
def extract_retro(frame):
is_mc = is_montecarlo(frame)
retro = {}
if contains_retro(frame):
retro_extraction = build_retro_extraction(is_mc)
for retro_variable in retro_extraction.keys():
retro[retro_variable] = eval(retro_extraction[retro_variable])
return retro
def extract_truth(frame, input_file, extract_these_truths = None):
if extract_these_truths == None:
extract_these_truths = build_standard_extraction()
is_mc = is_montecarlo(frame)
sim_type = find_simulation_type(is_mc,input_file)
event_time = frame['I3EventHeader'].start_time.utc_daq_time
RunID, SubrunID, EventID, SubEventID = extract_event_ids(frame)
if is_mc:
MCInIcePrimary, interaction_type, elasticity = case_handle_this(frame, sim_type)
if MCInIcePrimary != None:
## is not noise
truth = {}
for truth_variable in extract_these_truths.keys():
truth[truth_variable] = eval(extract_these_truths[truth_variable])
else:
## is real data or noise
blank_extraction = build_blank_extraction()
truth = {}
for truth_variable in blank_extraction.keys():
truth[truth_variable] = eval(blank_extraction[truth_variable])
return truth
def extract_features(frame, key, gcd_dict,calibration):
charge = []
time = []
width = []
area = []
rqe = []
x = []
y = []
z = []
if key in frame.keys():
data = frame[key]
try:
om_keys = data.keys()
except:
try:
if "I3Calibration" in frame.keys():
data = frame[key].apply(frame)
om_keys = data.keys()
else:
frame["I3Calibration"] = calibration
data = frame[key].apply(frame)
om_keys = data.keys()
except:
data = dataclasses.I3RecoPulseSeriesMap.from_frame(frame,key)
om_keys = data.keys()
for om_key in om_keys:
pulses = data[om_key]
for pulse in pulses:
charge.append(pulse.charge)
time.append(pulse.time)
width.append(pulse.width)
area.append(gcd_dict[om_key].area)
rqe.append(frame["I3Calibration"].dom_cal[om_key].relative_dom_eff)
x.append(gcd_dict[om_key].position.x)
y.append(gcd_dict[om_key].position.y)
z.append(gcd_dict[om_key].position.z)
features = {'charge': charge,
'dom_time': time,
'dom_x': x,
'dom_y': y,
'dom_z': z,
'width' : width,
'pmt_area': area,
'rde': rqe}
return features
def find_simulation_type(mc, input_file):
if mc == False:
sim_type = 'data'
else:
sim_type = 'lol'
if 'muon' in input_file:
sim_type = 'muongun'
if 'corsika' in input_file:
sim_type = 'corsika'
if 'genie' in input_file:
sim_type = 'genie'
if 'noise' in input_file:
sim_type = 'noise'
if sim_type == 'lol':
print('SIM TYPE NOT FOUND!')
return sim_type
def load_geospatial_data(gcd_path):
gcd_file = dataio.I3File(gcd_path)
g_frame = gcd_file.pop_frame(icetray.I3Frame.Geometry)
om_geom_dict = g_frame["I3Geometry"].omgeo
calibration = gcd_file.pop_frame(icetray.I3Frame.Calibration)["I3Calibration"]
return om_geom_dict, calibration
def is_empty(features):
if features['dom_x'] != None:
return False
else:
return True
def extract_event_ids(frame):
RunID = frame['I3EventHeader'].run_id
SubrunID = frame['I3EventHeader'].sub_run_id
EventID = frame['I3EventHeader'].event_id
SubEventID = frame['I3EventHeader'].sub_event_id
return RunID, SubrunID, EventID, SubEventID
def apply_event_no(extraction, event_no_list, event_counter):
out = pd.DataFrame(extraction.values()).T
out.columns = extraction.keys()
out['event_no'] = event_no_list[event_counter]
return out
def check_for_new_columns(columns, biggest_columns):
if len(columns) > len(biggest_columns):
return columns
else:
return biggest_columns
def write_dicts(settings):
input_files,id,gcd_files,outdir , max_dict_size,event_no_list, pulse_map_keys,custom_truth, db_name,verbose = settings
# Useful bits
event_counter = 0
feature_big = {}
truth_big = pd.DataFrame()
retro_big = pd.DataFrame()
file_counter = 0
output_count = 0
gcd_count = 0
for u in range(len(input_files)):
input_file = input_files[u]
gcd_dict, calibration = load_geospatial_data(gcd_files[u])
i3_file = dataio.I3File(input_file, "r")
if verbose > 0:
print('Worker %s Reading %s'%(id,input_file.split('/')[-1]))
print(input_file)
sys.stdout.flush()
gcd_count +=1
while i3_file.more() :
try:
frame = i3_file.pop_physics()
except:
frame = False
if frame :
pulse_maps = {}
for pulse_map_key in pulse_map_keys:
pulse_maps[pulse_map_key] = extract_features(frame,pulse_map_key, gcd_dict,calibration)
truths = extract_truth(frame, input_file, custom_truth)
truth = apply_event_no(truths, event_no_list, event_counter)
retros = extract_retro(frame)
if len(retros)>0:
retro = apply_event_no(retros, event_no_list, event_counter)
for pulse_map_key in pulse_map_keys:
if not is_empty(pulse_maps[pulse_map_key]) :
pulse_maps[pulse_map_key] = apply_event_no(pulse_maps[pulse_map_key], event_no_list, event_counter)
event_counter += 1
if len(feature_big) == 0:
feature_big = pulse_maps
else:
for pulse_map_key in pulse_map_keys:
feature_big[pulse_map_key] = feature_big[pulse_map_key].append(pulse_maps[pulse_map_key],ignore_index = True, sort = True)
truth_big = truth_big.append(truth, ignore_index = True, sort = True)
if len(retros)>0 :
retro_big = retro_big.append(retro, ignore_index = True, sort = True)
if len(truth_big) >= max_dict_size:
print('saving')
engine = sqlalchemy.create_engine('sqlite:///'+outdir + '/%s/tmp/worker-%s-%s.db'%(db_name,id,output_count))
truth_big.to_sql('truth',engine,index= False, if_exists = 'append')
if len(retro_big)> 0:
retro_big.to_sql('RetroReco',engine,index= False, if_exists = 'append')
for pulse_map_key in pulse_map_keys:
feature_big[pulse_map_key].to_sql(pulse_map_key,engine,index= False, if_exists = 'append')
engine.dispose()
feature_big = {} #pd.DataFrame()
truth_big = pd.DataFrame()
retro_big = pd.DataFrame()
output_count +=1
file_counter +=1
if verbose > 0:
print('Worker %s has finished %s/%s I3 files.'%(id, file_counter, len(input_files)))
if (len(feature_big) > 0):
print('saving eof')
engine = sqlalchemy.create_engine('sqlite:///'+outdir + '/%s/tmp/worker-%s-%s.db'%(db_name,id,output_count))
truth_big.to_sql('truth',engine,index= False, if_exists = 'append')
if len(retro_big)> 0:
retro_big.to_sql('RetroReco',engine,index= False, if_exists = 'append')
for pulse_map_key in pulse_map_keys:
feature_big[pulse_map_key].to_sql(pulse_map_key,engine,index= False, if_exists = 'append')
engine.dispose()
feature_big = {}
truth_big = pd.DataFrame()
retro_big = | pd.DataFrame() | pandas.DataFrame |
import h5py
import glob
import os.path
import numpy as np
import pandas as pd
import argparse
import glob
import pdb
import qtl_fdr_utilities
#writeToOneFile=True; compressed = False; overWrite=True; minimalPValue = 1; minimalFeaturePValue = 1; topMode = False; debugMode = False
#QTL_Dir = "./"; OutputDir = "./"; featureGroupFile="./featureGrouping.txt"
def minimal_qtl_processing(QTL_Dir, OutputDir, featureGroupFile, cis_mode, writeToOneFile=True, compressed = False, overWrite=True, minimalPValue = 1, minimalFeaturePValue = 1, topMode = False, topGroupMode = False , debugMode = False):
qtl_results_file='qtl_results_'
snp_metadata_file='snp_metadata_'
feature_metadata_file='feature_metadata_'
if topMode:
output_file='top_qtl_results_'
elif topGroupMode :
output_file='top_group_qtl_results_'
else:
output_file='qtl_results_'
if os.path.isfile(featureGroupFile):
feature_grouping_df = pd.read_csv(featureGroupFile,sep='\t')
else :
print("Error: feature grouping file doesn't excist.")
return(-1)
h5FilesToProcess = (glob.glob(QTL_Dir+"/qtl_*.h5"))
#iterate over h5files
#print(h5FilesToProcess)
#print(os.path.dirname(h5FilesToProcess[1]))
for file in h5FilesToProcess :
print(file)
partTmp = os.path.basename(file).replace(qtl_results_file,"").replace(".h5","")
if(debugMode):
print(partTmp)
if(writeToOneFile):
outputFile = OutputDir+output_file+"all.txt"
else:
outputFile = OutputDir+output_file+partTmp+".txt"
#print(outputFile)
if(((os.path.isfile(outputFile) or os.path.isfile(outputFile+".gz")) and not overWrite) and not writeToOneFile):
#print("Skipping: "+partTmp)
continue
#else :
#print('Processing: '+partTmp)
#print(partTmp)
if not os.path.isfile(QTL_Dir+"/"+snp_metadata_file+partTmp+".txt"):
print("Skipping: " +partTmp + " not all necessary files are present.")
continue
if not os.path.isfile(QTL_Dir+"/"+feature_metadata_file+partTmp+".txt"):
print("Skipping: " +partTmp + " not all necessary files are present.")
continue
try :
#print(QTL_Dir+"/"+feature_metadata_file+partTmp+".txt")
#print(QTL_Dir+"/"+snp_metadata_file+partTmp+".txt")
ffea= pd.read_table(QTL_Dir+"/"+feature_metadata_file+partTmp+".txt", sep='\t')
fsnp= pd.read_table(QTL_Dir+"/"+snp_metadata_file+partTmp+".txt", sep='\t')
except:
print("Issue in features or snp annotation.\n Skipping: "+partTmp)
continue
ffea = ffea.rename(index=str, columns={"chromosome": "feature_chromosome", "start": "feature_start", "end": "feature_end"})
fsnp = fsnp.rename(index=str, columns={"chromosome": "snp_chromosome", "position": "snp_position"})
#pdb.set_trace()
frez=h5py.File(file,'r')
frezkeys= np.array([k.replace('_i_','') for k in list(frez.keys())])
data={}
for key in ['feature_id','snp_id','p_value','beta','beta_se','empirical_feature_p_value']:
data[key]=np.zeros(len(np.unique(list(frezkeys))),dtype='object')+np.nan
for ifea,report_feature in enumerate(np.unique(list(frezkeys))):
for key in ['snp_id','p_value','beta','beta_se','empirical_feature_p_value']:
temp = np.array(frez[report_feature][key])
data[key][ifea]=np.hstack(temp).astype('U')
data['feature_id'][ifea]=np.hstack(np.repeat(report_feature,len(frez[report_feature][key])))
#pdb.set_trace()
for key in data.keys():
data[key]=np.hstack(data[key])
data=pd.DataFrame(data)
data = pd.merge(data, feature_grouping_df, on='feature_id', how='left')
#print(data.head())
data = pd.merge(data, ffea, on='feature_id', how='left')
#print(data.head())
if(len(glob.glob(QTL_Dir+'snp_qc_metrics_naContaining_feature_*.txt'))>0):
##Here we need to check, we can based on the output of glob do this quicker.
temp2 = pd.DataFrame(columns=data.columns)
for key in frezkeys:
if os.path.isfile(QTL_Dir+"/snp_qc_metrics_naContaining_feature_"+key+".txt"):
fsnp_rel = pd.read_table(QTL_Dir+"/snp_qc_metrics_naContaining_feature_"+key+".txt", sep='\t')
temp_t = data.loc[data["feature_id"]==key]
fsnp_t = fsnp.loc[:,["snp_id","snp_chromosome","snp_position","assessed_allele"]]
fsnp_t = pd.merge(fsnp_t, fsnp_rel, on='snp_id', how='right')
temp_t = pd.merge(temp_t, fsnp_t, on='snp_id', how='left')
temp2 = temp2.append(temp_t,sort=False)
else:
temp_t = data.loc[data["feature_id"]==key]
temp_t = | pd.merge(temp_t, fsnp, on='snp_id', how='left') | pandas.merge |
# coding: utf-8
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# * ***Major questions to answer(A/B Testing):***
# 1. Does the installment amount affect loan status ?
# 2. Does the installment grade affect loan status ?
# 3. Which grade has highest default rate ?
# 4. Does annual income/home-ownership affect default rate ?
# 5. Which state has highest default rate ?
# * ***Text Analysis - Phase 2 ***
# 6. Is it that a people with a certain empoyee title are taking up more loans as compared to others ?
# 7. Does a specific purpose affect loan status ?
# * ***Model Building - Phase 3***
# 8. Trying various models and comparing them
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# In[50]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Importing the libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
# Reading the dataset
data = pd.read_csv("../input/loan.csv")
data_1 = pd.DataFrame(data) # Creating a copy
# Checking the dataset
data.head()
data.tail()
data.describe()
data = data.iloc[:,2:-30].values
# In[51]:
# Setting the target vector
status = data[:,14]
unique_labels = np.unique(status, return_counts = True)
# print(unique_labels)
plt.figure()
plt.bar(unique_labels[0],unique_labels[1])
plt.xlabel('Type of label')
plt.ylabel('Frequency')
plt.title('Status categories')
plt.show()
category = unique_labels[0]
frequency = unique_labels[1]
category_count = np.vstack((category,frequency))
category_list = np.array(category_count.T).tolist()
category_list_1 = pd.DataFrame(category_list)
print(category_list_1)
# Let us consider only 2 major categories "Charged off" and "Fully Paid". A few reasons to do this:
# 1. To convert it into a binary cassification problem, and to analyze in detail the effect of important variables on the loan status.
# 2. A lot of observations show status "Current", so we do not know whether it will be "Charged Off", "Fully Paid" or "Default".
# 3. The observations for "Default" are too less as compared to "Fully Paid" or "Charged Off", to thoughroly investigate those observations with loan status as "Default".
# 4. The remaining categories of "loan status" are not of prime importance for this analysis.
#
# In[52]:
category_one_data = data_1[data_1.loan_status == "Fully Paid"]
category_two_data = data_1[data_1.loan_status == "Charged Off"]
new_data = np.vstack((category_one_data,category_two_data))
# new_data_copy = pd.DataFrame(new_data)
new_data = new_data[:,2:-30]
new_data_df = pd.DataFrame(new_data)
# **Exploratory Data Analysis**
# 1. Variable under inspection:Installment amount
# Whether there is any trend with respect to Installment amount.
# For eg: Higher the installment amount higher the number of "Charged Off" observations ?
#
# In[53]:
# Creating bins for various installment amounts
installment_amt = new_data[:,5]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size,1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
installment_groups = (np.array(np.unique(binned_installment_amt, return_counts = True))).T
# A bar plot to figure out the distribution of installment amount
plt.figure()
plt.bar(installment_groups[:,0],installment_groups[:,1])
plt.xlabel('Installment_amt_grp')
plt.ylabel('Frequency')
plt.title('Distribution of Installment amount categories')
plt.show()
# Appending the installment_groups to status
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new)) # 0's = Fully Paid, 1's = Charged Off
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt,status_labels)))
status_installment_groups.columns = ['Installment_amt_grp','status_labels']
# Looking for a trend in the defaulted observations
Charged_off = status_installment_groups[status_installment_groups.status_labels == 1]
temp_1 = Charged_off.iloc[:,0].values
plot_var_1 = np.array(np.unique(temp_1, return_counts = True))
plot_var_1 = plot_var_1[:,:-1]
plot_var_11 = plot_var_1.T # Eliminating the 10th, since as only one reading
# Looking for a trend in the successful observations
Fully_paid = status_installment_groups[status_installment_groups.status_labels == 0]
temp_2 = Fully_paid.iloc[:,0].values
plot_var_2 = np.array(np.unique(temp_2, return_counts = True))
plot_var_22 = plot_var_2.T
# Concatenating the two variables
plot_var_stack = np.hstack((plot_var_11,plot_var_22))
plot_var_stack = pd.DataFrame(plot_var_stack)
plot_var_stack = plot_var_stack.drop(plot_var_stack.columns[2], axis=1)
plot_var_stack.columns = ['Installment_amt_grp','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack['Charged Off'], plot_var_stack['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack['Fully Paid'], totals)]
plot_var_stack = np.array(plot_var_stack)
group_number = plot_var_stack[:,0]
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Installment_amt_grp')
plt.ylabel('Percent loan status')
plt.title('Installment amount categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# Though we can observe a slight variation in the "% Charged Off" values, overall we can say that the installment amount does not seem to affect the loan status.
#
# 2) Variable under inspection:Grade.
# Whether the grade affects the Installment amount ?
# In[54]:
installment_grade = new_data[:,6]
# print(np.unique(installment_grade, return_counts = True))
installment_grade_list = np.array(np.unique(installment_grade, return_counts = True))
installment_grade_df = pd.DataFrame(installment_grade_list.T)
print(installment_grade_df)
# Distribution of Installment grade
plt.figure()
plt.bar(installment_grade_df[0],installment_grade_df[1])
plt.xlabel('Installment_grade')
plt.ylabel('Frequency')
plt.title('Distribution of Installment grade categories')
plt.show()
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade,status_labels)))
status_installment_grade.columns = ['Installment_grade','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_grade = status_installment_grade[status_installment_grade.status_labels == 1]
temp_11 = Charged_off_grade.iloc[:,0].values
plot_var_grade = np.array(np.unique(temp_11, return_counts = True))
plot_var_grade_11 = plot_var_grade.T
# Looking for a trend in the successful observations
Fully_Paid_grade = status_installment_grade[status_installment_grade.status_labels == 0]
temp_22 = Fully_Paid_grade.iloc[:,0].values
plot_var_grade_2 = np.array(np.unique(temp_22, return_counts = True))
plot_var_grade_22 = plot_var_grade_2.T # Eliminating the 10th, since as only one reading
# Concatenating the two variables
plot_var_stack_1 = np.hstack((plot_var_grade_11,plot_var_grade_22))
plot_var_stack_1 = pd.DataFrame(plot_var_stack_1)
plot_var_stack_1 = plot_var_stack_1.drop(plot_var_stack_1.columns[2], axis=1)
plot_var_stack_1.columns = ['Installment_grade_grp','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_1['Charged Off'], plot_var_stack_1['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_1['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_1['Fully Paid'], totals)]
# plot_var_stack_1 = np.array(plot_var_stack_1)
group_number = plot_var_stack_1['Installment_grade_grp']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Installment_grade')
plt.ylabel('Percent loan status')
plt.title('Installment grade categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# 1. The grade does seem to affect the default rate: Higher the grade higher the percentage of "Charged Off" loans.
# 2. Also from the plot we can conclude that Grade G has the highest "% Charged Off"
# 3. To further investigate this we need to know what the Grade refers to, does it represent risk factor in lending the money ?
# If yes, then the results make sense: Higher the grade higher the risk factor.
# 4. Also, from the distribution plot we can see that they are already lending only a handful amount of loans to people classified in "Grade G". They should be more precautious in their approach to lending money to customers who are classified to be in higher grades.
#
# 3) Variable under inspection:Home Status
# In[55]:
home_status = new_data_df[10]
# print(np.unique(home_status, return_counts = True))
home_status_list = np.array(np.unique(home_status, return_counts = True))
home_status_df = pd.DataFrame(home_status_list.T)
print(home_status_df)
# Distribution of Emp_length
plt.figure()
plt.bar(home_status_df[0],home_status_df[1])
plt.xlabel('Home Status')
plt.ylabel('Frequency')
plt.title('Home Status categories')
plt.show()
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status,status_labels)))
status_home_status.columns = ['Home Status','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_home_status = status_home_status[status_home_status.status_labels == 1]
temp_41 = Charged_off_home_status.iloc[:,0].values
plot_var_home_status = np.array(np.unique(temp_41, return_counts = True))
plot_var_home_status_44 = pd.DataFrame(plot_var_home_status.T)
# Looking for a trend in the successful observations
Fully_Paid_home_status = status_home_status[status_home_status.status_labels == 0]
temp_42 = Fully_Paid_home_status.iloc[:,0].values
plot_var_home_status_2 = np.array(np.unique(temp_42, return_counts = True))
plot_var_home_status_55 = pd.DataFrame(plot_var_home_status_2.T) # Eliminating the 10th, since as only one reading
plot_var_home_status_55 = plot_var_home_status_55.drop(0) # Eliminating the home status = "any", since as only one reading
# Concatenating the two variables
plot_var_stack_3 = np.hstack((plot_var_home_status_44,plot_var_home_status_55))
plot_var_stack_3 = pd.DataFrame(plot_var_stack_3)
plot_var_stack_3 = plot_var_stack_3.drop(plot_var_stack_3.columns[2], axis=1)
plot_var_stack_3.columns = ['Home Status','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_3['Charged Off'], plot_var_stack_3['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_3['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_3['Fully Paid'], totals)]
#plot_var_stack_3 = np.array(plot_var_stack_3)
group_number = plot_var_stack_3['Home Status']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Home Status')
plt.ylabel('Percent loan status')
plt.title('Home Status categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# From the stacked percentage plot, we can observe that the feature "Home Status" has no potential effect on our target variable "loan status"
# 4) Variable under inspection:Annual Income
#
# To investigate this variable I create four bins to classify the annual income:
# 1. People earning less than USD 40,000.
# 2. People earning between USD 40,000 to USD 70,000.
# 3. People earning between USD 70,000 to USD 100,000.
# 4. People earning more than USD 100,000,
#
#
# In[56]:
## Now checking the effect of annual income on loan status
# Creating bins for various income amounts
annual_income = new_data[:,11]
#bins_2 = np.linspace(annual_income.min(), annual_income.max(), 3)
bins_2 = np.array([40000,70000,100000,150000])
annual_income = annual_income.astype(float).reshape(annual_income.size,1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
annual_groups = (np.array(np.unique(binned_annual_income, return_counts = True))).T
# A bar plot to figure out the distribution of income amount
plt.figure()
plt.bar(annual_groups[:,0],annual_groups[:,1])
plt.xlabel('Annual income amount group')
plt.ylabel('Frequency')
plt.title('Annual income amount categories')
plt.legend(loc="upper right")
plt.show()
# Appending the income_groups to status
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income,status_labels)))
status_annual_groups.columns = ['Annual_income_grp','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_annual_income = status_annual_groups[status_annual_groups.status_labels == 1]
temp_51 = Charged_off_annual_income.iloc[:,0].values
plot_var_annual_income = np.array(np.unique(temp_51, return_counts = True))
plot_var_annual_income_66 = pd.DataFrame(plot_var_annual_income.T)
# Looking for a trend in the successful observations
Fully_Paid_annual_income = status_annual_groups[status_annual_groups.status_labels == 0]
temp_52 = Fully_Paid_annual_income.iloc[:,0].values
plot_var_annual_income_2 = np.array(np.unique(temp_52, return_counts = True))
plot_var_annual_income_77 = pd.DataFrame(plot_var_annual_income_2.T) # Eliminating the 10th, since as only one reading
#plot_var_annual_income_55 = plot_var_home_status_55.drop(0) # Eliminating the home status = "any", since as only one reading
# Concatenating the two variables
plot_var_stack_4 = np.hstack((plot_var_annual_income_66,plot_var_annual_income_77))
plot_var_stack_4 = pd.DataFrame(plot_var_stack_4)
plot_var_stack_4 = plot_var_stack_4.drop(plot_var_stack_4.columns[2], axis=1)
plot_var_stack_4.columns = ['Annual Income Group','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_4['Charged Off'], plot_var_stack_4['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_4['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_4['Fully Paid'], totals)]
#plot_var_stack_4 = np.array(plot_var_stack_4)
group_number = plot_var_stack_4['Annual Income Group']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Annual income amount group')
plt.ylabel('Percent loan status')
plt.title('Annual income amount categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# We can observe a slight downword trend, which suggests that people with higher income are less likely to get "Charged Off".
#
# 5) Variable under inspection:State
# An important question here would be to check whether the state affects the loan status. Also, to find out which state has highest "% Charged Off".
# In[57]:
# Separating the variable under investigation
state = new_data_df[21]
#print(np.unique(state, return_counts = True))
state_list = np.array(np.unique(state, return_counts = True))
state_df = pd.DataFrame(state_list.T)
print(state_df)
# Distribution of Emp_length
plt.figure()
plt.bar(state_df[0],state_df[1])
plt.xlabel('State')
plt.ylabel('Frequency')
plt.title('State')
plt.show()
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state,status_labels)))
status_state.columns = ['State','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_state = status_state[status_state.status_labels == 1]
temp_61 = Charged_off_state.iloc[:,0].values
plot_var_state = np.array(np.unique(temp_61, return_counts = True))
plot_var_state_88 = pd.DataFrame(plot_var_state.T)
# Looking for a trend in the successful observations
Fully_Paid_state = status_state[status_state.status_labels == 0]
temp_62 = Fully_Paid_state.iloc[:,0].values
plot_var_state_2 = np.array(np.unique(temp_62, return_counts = True))
plot_var_state_99 = pd.DataFrame(plot_var_state_2.T)
# * We know US has only 50 States, but we have a list of 51 states. On investigation we can see that DC is added as a state even when it isn't a state.
# * We also notice that its present in both the cases, charged off as well as in fully paid observations.
# * So I decide on just eliminating DC from the list (Keep this in mind) .
# * Also, states like ME and ND have no people with "Charged Off" observations, so we will just take them off the list as well and check for any trends in the state variable.
#
# In[58]:
plot_var_state_88 = plot_var_state_88.drop(7)
plot_var_state_99 = plot_var_state_99.drop([7,21,28]) # Eliminating the home status = "any", since as only one reading
# Concatenating the two variables
plot_var_stack_5 = np.hstack((plot_var_state_88,plot_var_state_99))
plot_var_stack_5 = pd.DataFrame(plot_var_stack_5)
plot_var_stack_5 = plot_var_stack_5.drop(plot_var_stack_5.columns[2], axis=1)
plot_var_stack_5.columns = ['state','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_5['Charged Off'], plot_var_stack_5['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_5['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_5['Fully Paid'], totals)]
#plot_var_stack_5 = np.array(plot_var_stack_5)
group_number = plot_var_stack_5['state']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
plt.xlabel('State')
plt.ylabel('Percent loan status')
plt.title('State')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
###### Sort in order and print top 5 states with max default % ########
# Concatenating C_Off and state
C_Off = pd.DataFrame(C_Off)
temp_plot = np.hstack((plot_var_stack_5, C_Off))
temp_plot = | pd.DataFrame(temp_plot) | pandas.DataFrame |
""" MCH API ver 0.1
Author: <NAME>
License: CC-BY-SA 4.0
2020 Mexico
"""
import os
from flask import Flask, jsonify, json, Response
from flask_restful import Api, Resource, reqparse, abort
from flask_mysqldb import MySQL
import pandas as pd
import numpy as np
import json
from os.path import abspath, dirname, join
app = Flask(__name__)
# Mysql connection
app.config['MYSQL_HOST'] = os.getenv('MCH_DB_HOST')
app.config['MYSQL_USER'] = os.getenv('MCH_DB_USER')
app.config['MYSQL_PASSWORD'] = os.getenv('MCH_DB_PASSWORD')
app.config['MYSQL_DB'] = os.getenv('MCH_DB_NAME')
app.config['MYSQL_PORT'] = int(os.getenv('MCH_DB_PORT'))
app.config['SECRET_KEY'] = os.getenv("APP_SECRET")
mysql = MySQL(app)
api = Api(app)
# dataframe for stations table
stnmdata = pd.DataFrame()
# read MCH languaje definition from mch.dbn
filemch = open('mch.dbn', 'r')
filemch.readline() # odbc connector
filemch.readline() # mysql5
filemch.readline() # interface languaje
mchlang = filemch.readline() # database languaje
# read fields and tables names definition file
deftbfl = pd.read_csv('MCHtablasycampos.def', sep = "\t", names = ['sec','type', 'id_sec', 'esp', 'eng', 'fra', '4', 'comment'], encoding='utf_8')
# new dataframe for especific languaje
ltbfl = pd.DataFrame()
# looking for especific fields and tables for the languaje
if int(mchlang) == 1:
ltbfl = deftbfl[['id_sec','esp']]
ltbfl.set_index('id_sec')
if int(mchlang) == 2:
ltbfl = deftbfl[['id_sec','eng']]
ltbfl.set_index('id_sec')
if int(mchlang) == 3:
ltbfl = deftbfl[['id_sec','fra']]
ltbfl.set_index('id_sec')
def deg_to_dms(deg):
d = int(deg)
md = abs(deg - d) * 60
m = int(md)
sd = (md - m) * 60
return [d, m, sd]
class stations(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
strqry='select * from ' +stntable.iloc[0,1] +' order by ' +stnfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH(2)','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stations, "/API/stations")
qry_station_req_arg = reqparse.RequestParser()
pars = qry_station_req_arg.add_argument("stn_id",type=str,help="Station ID",required=True)
class qry_station(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='select * from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
qrystation = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=qrystation,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Station not found...")
#abort_if_stn_not_exist("stn_id")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('stn_name')
parser.add_argument('stn_name2')
parser.add_argument('t_zone')
parser.add_argument('long')
parser.add_argument('lat')
parser.add_argument('alt')
parser.add_argument('state_id')
parser.add_argument('reg_m')
parser.add_argument('catchm')
parser.add_argument('s_cat')
parser.add_argument('o_reg')
parser.add_argument('hydro_r')
parser.add_argument('rh')
parser.add_argument('mun_id')
parser.add_argument('mosl')
parser.add_argument('mosv')
parser.add_argument('mesl')
parser.add_argument('mesv')
parser.add_argument('s_level')
parser.add_argument('s_stor')
parser.add_argument('fs_level')
parser.add_argument('fs_stor')
parser.add_argument('ds_level')
parser.add_argument('ds_cap')
parser.add_argument('us_capl')
parser.add_argument('ustor')
parser.add_argument('hstor')
parser.add_argument('crl_s')
parser.add_argument('mnl_s')
parser.add_argument('mxl_s')
parser.add_argument('cr_f')
parser.add_argument('mn_dis')
parser.add_argument('mx_dis')
parser.add_argument('stream')
parser.add_argument('dist')
parser.add_argument('infr')
parser.add_argument('type')
parser.add_argument('use')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
stn_id = args.get('stn_id')
stn_name = args.get('stn_name')
stn_name2 = args.get('stn_name2')
t_zone = args.get('t_zone')
long2 = args.get('long')
lat2 = args.get('lat')
alt = args.get('alt')
state_id = args.get('state_id')
reg_m = args.get('reg_m')
catchm = args.get('catchm')
s_cat = args.get('s_cat')
o_reg = args.get('o_reg')
hydro_r = args.get('hydro_r')
rh = args.get('rh')
mun_id = args.get('mun_id')
mosl = args.get('mosl')
mosv = args.get('mosv')
mesl = args.get('mesl')
mesv = args.get('mesv')
s_level = args.get('s_level')
s_stor = args.get('s_stor')
fs_level = args.get('fs_level')
fs_stor = args.get('fs_stor')
ds_level = args.get('ds_level')
ds_cap = args.get('ds_cap')
us_capl = args.get('us_capl')
ustor = args.get('ustor')
hstor = args.get('hstor')
crl_s = args.get('crl_s')
mnl_s = args.get('mnl_s')
mxl_s = args.get('mxl_s')
cr_f = args.get('cr_f')
mn_dis = args.get('mn_dis')
mx_dis = args.get('mx_dis')
stream = args.get('stream')
dist = args.get('dist')
infr = args.get('infr')
typee = args.get('type')
usee = args.get('use')
# check if input is at file
if jfile in (None, ''):
Latitude=deg_to_dms(float(lat2))
Longitude=deg_to_dms(float(long2))
slong2=str(Longitude[0])+'°'+str(Longitude[1]) +'´' +str(Longitude[2])
slat2=str(Latitude[0])+'°'+str(Latitude[1]) +'´' +str(Latitude[2])
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(stn_id) +'","' +str(stn_name) +'","' +str(stn_name2) +'","' +str(t_zone) +'","' + str(long2)
+ '","' +str(lat2) +'","' +str(alt) +'","' +str(long2) +'","' +str(lat2) +'","' +slong2 +'","' +slat2 +'","' +str(state_id) +'","' +str(reg_m)
+ '","' +str(catchm) +'","' +str(s_cat) +'","' +str(o_reg) +'","' +str(hydro_r) +'","' +str(rh) +'","' +str(mun_id) +'","","","","","","","","","","","","","' + str(mosl)
+ '","' +str(mosv) +'","' +str(mesl) +'","' +str(mesv) +'","' +str(s_level) +'","' +str(s_stor) +'","' +str(fs_level) +'","' + str(fs_stor)
+ '","' +str(ds_level) +'","' +str(ds_cap) +'","' +str(us_capl) +'","' +str(ustor) +'","' +str(hstor) +'","","","","' +str(crl_s) +'","' + str(mnl_s)
+ '","' +str(mxl_s) +'","' +str(cr_f) +'","' +str(mn_dis) +'","' +str(mx_dis) +'","' +str(stream) +'","' +str(dist) +'","' +str(infr) +'","' + str(typee)
+ '","' +str(usee) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'","' +data.iloc[int(n),3] +'","' + data.iloc[int(n),4]
+ '","' +data.iloc[int(n),5] +'","' +str(data.iloc[int(n),6]) +'","' +str(data.iloc[int(n),7]) +'","' +str(data.iloc[int(n),8]) +'","' +data.iloc[int(n),9] +'","' +data.iloc[int(n),10] +'","' +data.iloc[int(n),11]
+ '","' +data.iloc[int(n),12] + '","' +data.iloc[int(n),13] +'","' +data.iloc[int(n),14] +'","' +data.iloc[int(n),15] +'","' +data.iloc[int(n),16] +'","' +data.iloc[int(n),17] +'","' +data.iloc[int(n),18]
+ '","' +data.iloc[int(n),19] +'","' +data.iloc[int(n),20] +'","' +data.iloc[int(n),21] +'","' +data.iloc[int(n),22] +'","' +data.iloc[int(n),23] +'","' +data.iloc[int(n),24] +'","' +data.iloc[int(n),25]
+ '","' +data.iloc[int(n),26] + '","' +data.iloc[int(n),27] +'","' +data.iloc[int(n),28] +'","' +data.iloc[int(n),29] +'","' +data.iloc[int(n),30] +'","' +data.iloc[int(n),31]
+ '","' +data.iloc[int(n),32] +'","' +data.iloc[int(n),33] +'","' +data.iloc[int(n),34] +'","' +data.iloc[int(n),35] +'","' +data.iloc[int(n),36] +'","' +data.iloc[int(n),37] +'","' + data.iloc[int(n),38]
+ '","' +data.iloc[int(n),39] +'","' +data.iloc[int(n),40] +'","' +data.iloc[int(n),41] +'","' +data.iloc[int(n),42] +'","' +data.iloc[int(n),43] +'","' +data.iloc[int(n),44] +'","' +data.iloc[int(n),45]
+ '","' +data.iloc[int(n),46] +'","' +data.iloc[int(n),47] +'","' + data.iloc[int(n),48] +'","' +data.iloc[int(n),49] +'","' +data.iloc[int(n),50] +'","' +data.iloc[int(n),51] +'","' +data.iloc[int(n),52]
+ '","' +data.iloc[int(n),53] +'","' +data.iloc[int(n),54] +'","' +data.iloc[int(n),55] +'","' +data.iloc[int(n),56] +'","' +data.iloc[int(n),57] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'","' +data.iloc[0,3] +'","' + data.iloc[0,4]
+ '","' +data.iloc[0,5] +'","' +str(data.iloc[0,6]) +'","' +str(data.iloc[0,7]) +'","' +str(data.iloc[0,8]) +'","' +data.iloc[0,9] +'","' +data.iloc[0,10] +'","' +data.iloc[0,11]
+ '","' +data.iloc[0,12] + '","' +data.iloc[0,13] +'","' +data.iloc[0,14] +'","' +data.iloc[0,15] +'","' +data.iloc[0,16] +'","' +data.iloc[0,17] +'","' +data.iloc[0,18]
+ '","' +data.iloc[0,19] +'","' +data.iloc[0,20] +'","' +data.iloc[0,21] +'","' +data.iloc[0,22] +'","' +data.iloc[0,23] +'","' +data.iloc[0,24] +'","' +data.iloc[0,25]
+ '","' +data.iloc[0,26] + '","' +data.iloc[0,27] +'","' +data.iloc[0,28] +'","' +data.iloc[0,29] +'","' +data.iloc[0,30] +'","' +data.iloc[0,31]
+ '","' +data.iloc[0,32] +'","' +data.iloc[0,33] +'","' +data.iloc[0,34] +'","' +data.iloc[0,35] +'","' +data.iloc[0,36] +'","' +data.iloc[0,37] +'","' + data.iloc[0,38]
+ '","' +data.iloc[0,39] +'","' +data.iloc[0,40] +'","' +data.iloc[0,41] +'","' +data.iloc[0,42] +'","' +data.iloc[0,43] +'","' +data.iloc[0,44] +'","' +data.iloc[0,45]
+ '","' +data.iloc[0,46] +'","' +data.iloc[0,47] +'","' + data.iloc[0,48] +'","' +data.iloc[0,49] +'","' +data.iloc[0,50] +'","' +data.iloc[0,51] +'","' +data.iloc[0,52]
+ '","' +data.iloc[0,53] +'","' +data.iloc[0,54] +'","' +data.iloc[0,55] +'","' +data.iloc[0,56] +'","' +data.iloc[0,57] +'")')
qry.execute(strqry)
return 'Station stored',201
def delete(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='delete from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Station deleted',204
api.add_resource(qry_station, "/API/stations/qry_station")
class stngroups(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stngroups, "/API/stngroups")
class qry_stngroup(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup','Secuen','Station'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Stationgroup not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('file')
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
tdata=len(data.index)
rows=list(range(0,tdata))
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
return 'Stationgroup stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Stationgroup deleted',204
api.add_resource(qry_stngroup, "/API/stngroups/qry_stngroup")
class variables(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(variables, "/API/variables")
class qry_variable(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
parser = reqparse.RequestParser()
parser.add_argument('var_id')
args = parser.parse_args()
var_id = args.get('var_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable','VariabAbbrev','VariabDescrn','TableName','Unit','TypeDDorDE','CumulType','NbrDecimal','CalcbyGrp','CalcDTaD'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Variable not found...")
return parsed
api.add_resource(qry_variable, "/API/variables/qry_variable")
class states(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(states, "/API/states")
class qry_state(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
state_id = args.get('state_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="State not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('state_id')
parser.add_argument('state_2')
parser.add_argument('state_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
state_id = args.get('state_id')
state_2 = args.get('state_2')
state_name = args.get('state_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(state_id) +'","' +str(state_2) +'","' +str(state_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'State stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
stngp_id = args.get('state_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'State deleted',204
api.add_resource(qry_state, "/API/states/qry_state")
class municipalities(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(municipalities, "/API/municipalities")
class qry_municipality(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('mun_id')
args = parser.parse_args()
mun_id = args.get('mun_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ mun_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Municipality not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('mun_id')
parser.add_argument('mun_2')
parser.add_argument('mun_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
mun_id = args.get('mun_id')
mun_2 = args.get('mun_2')
mun_name = args.get('mun_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(mun_id) +'","' +str(mun_2) +'","' +str(mun_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Municipality stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('mun_id')
args = parser.parse_args()
stngp_id = args.get('mun_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ mun_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Municipality deleted',204
api.add_resource(qry_municipality, "/API/municipalities/qry_municipality")
class hydroregions(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
nfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Hydroreg','Hydroreg2','HydrRegionName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(hydroregions, "/API/hydroregions")
class qry_hydroregion(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
nfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
parser = reqparse.RequestParser()
parser.add_argument('hr_id')
args = parser.parse_args()
hr_id = args.get('hr_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ hr_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Hydroreg','Hydroreg2','HydrRegionName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Hydro Region not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('hr_id')
parser.add_argument('hr_2')
parser.add_argument('hr_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
hr_id = args.get('hr_id')
hr_2 = args.get('hr_2')
hr_name = args.get('hr_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(hr_id) +'","' +str(hr_2) +'","' +str(hr_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Hydrological Region stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
nfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
parser = reqparse.RequestParser()
parser.add_argument('hr_id')
args = parser.parse_args()
hr_id = args.get('hr_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ hr_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Hydrological Region deleted',204
api.add_resource(qry_hydroregion, "/API/hydroregions/qry_hydroregion")
class catchments(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Catchment','Catchment2','CatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(catchments, "/API/catchments")
class qry_catchment(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
parser = reqparse.RequestParser()
parser.add_argument('cat_id')
args = parser.parse_args()
cat_id = args.get('cat_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ cat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Catchment','Catchment2','CatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Catchment not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('cat_id')
parser.add_argument('cat_2')
parser.add_argument('cat_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
cat_id = args.get('cat_id')
cat_2 = args.get('cat_2')
cat_name = args.get('cat_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(cat_id) +'","' +str(cat_2) +'","' +str(cat_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Catchment stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
parser = reqparse.RequestParser()
parser.add_argument('cat_id')
args = parser.parse_args()
cat_id = args.get('cat_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ cat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Catchment deleted',204
api.add_resource(qry_catchment, "/API/catchments/qry_catchment")
class subcatchments(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Subcatchment','Subcatchment2','SubCatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(subcatchments, "/API/subcatchments")
class qry_subcatchment(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
parser = reqparse.RequestParser()
parser.add_argument('scat_id')
args = parser.parse_args()
scat_id = args.get('scat_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ scat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Subcatchment','Subcatchment2','SubCatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Subcatchment not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('scat_id')
parser.add_argument('scat_2')
parser.add_argument('scat_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
scat_id = args.get('scat_id')
scat_2 = args.get('scat_2')
scat_name = args.get('scat_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(scat_id) +'","' +str(scat_2) +'","' +str(scat_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Subcatchment stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
parser = reqparse.RequestParser()
parser.add_argument('scat_id')
args = parser.parse_args()
scat_id = args.get('scat_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ scat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Subcatchment deleted',204
api.add_resource(qry_subcatchment, "/API/subcatchments/qry_subcatchment")
class units(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
nfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Unit','UnitDescription'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(units, "/API/units")
class qry_unit(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
nfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
parser = reqparse.RequestParser()
parser.add_argument('unit_id')
args = parser.parse_args()
unit_id = args.get('unit_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ unit_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Unit','UnitDescription'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Unit not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('unit_id')
parser.add_argument('unit_desc')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
unit_id = args.get('unit_id')
unit_desc = args.get('unit_desc')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(unit_id) +'","' +str(unit_desc) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'")')
qry.execute(strqry)
return 'Unit stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
nfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
parser = reqparse.RequestParser()
parser.add_argument('unit_id')
args = parser.parse_args()
unit_id = args.get('unit_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ unit_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Unit deleted',204
api.add_resource(qry_unit, "/API/units/qry_unit")
class dailydata(Resource):
def get(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
varfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
vtfield = ltbfl[ltbfl['id_sec'] == 'ncTipoDDoDE']
vnfield = ltbfl[ltbfl['id_sec'] == 'ncNombreTabla']
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
valuefield = ltbfl[ltbfl['id_sec'] == 'ncValor']
maxvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMax']
minvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMin']
maxdatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMax']
mindatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMin']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('date_ini')
parser.add_argument('date_end')
parser.add_argument('datee')
args = parser.parse_args()
stn_id = args.get('stn_id')
var_id = args.get('var_id')
date_ini = args.get('date_ini')
date_end = args.get('date_end')
datee = args.get('datee')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
extreme = False
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = variable.iloc[0,5] +variable.iloc[0,3]
if variable.iloc[0,5] == 'DE':
extreme = True
else:
abort(404, message="Variable not found...")
qry.close
# check if it is a date or a period
if datee in (None, ''):
if extreme == True:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] + ',' +maxdatefield.iloc[0,1] + ',' +maxvalfield.iloc[0,1] + ',' +mindatefield.iloc[0,1] + ',' +minvalfield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'>="' + str(date_ini) +'" and ' + datefield.iloc[0,1] +'<="' + str(date_end) +'"'
else:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'>="' + str(date_ini) +'" and ' + datefield.iloc[0,1] +'<="' + str(date_end) +'"'
else:
if extreme == True:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] + ',' +maxdatefield.iloc[0,1] + ',' +maxvalfield.iloc[0,1] + ',' +mindatefield.iloc[0,1] + ',' +minvalfield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'="' + str(datee) +'"'
else:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'="' + str(datee) +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
if extreme == True:
ddata = pd.DataFrame(data=dataqry,columns=['Station','Date','Value','MaxValDate','MaxValue','MinValDate','MinValue'])
else:
ddata = pd.DataFrame(data=dataqry,columns=['Station','Date','Value'])
jsondata = ddata.to_json(orient="records",date_format='iso', date_unit='s')
parsed = json.loads(jsondata)
else:
abort(404, message="There is no data...")
return parsed
def post(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
varfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
vtfield = ltbfl[ltbfl['id_sec'] == 'ncTipoDDoDE']
vnfield = ltbfl[ltbfl['id_sec'] == 'ncNombreTabla']
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
valuefield = ltbfl[ltbfl['id_sec'] == 'ncValor']
maxvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMax']
minvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMin']
maxdatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMax']
mindatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMin']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('datee')
parser.add_argument('value')
parser.add_argument('maxvaldate')
parser.add_argument('maxvalue')
parser.add_argument('minvaldate')
parser.add_argument('minvalue')
args = parser.parse_args()
jfile = args.get('file')
stn_id = args.get('stn_id')
var_id = args.get('var_id')
datee = args.get('datee')
value = args.get('value')
maxvaldate = args.get('maxvaldate')
maxvalue = args.get('maxvalue')
minvaldate = args.get('minvaldate')
minvalue = args.get('minvalue')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
extreme = False
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = variable.iloc[0,5] +variable.iloc[0,3]
if (variable.iloc[0,5] == 'DE' or variable.iloc[0,5] == 'de'):
extreme = True
else:
abort(404, message="Variable not found...")
qry.close
# verify if input is a file
if jfile in (None, ''):
# check if it is extreme data
if extreme == True:
strqry = ('insert ignore into ' +tablename +' values("' +str(stn_id) +'","' +str(datee) +'","' +str(value) + '","",' +str(maxvaldate) +'","' +str(maxvalue) +'",",""' +str(minvaldate) +'","' +str(minvalue) +'","","0","0","0","API")')
else:
strqry = ('insert ignore into ' +tablename +' values("' +str(stn_id) +'","' +str(datee) +'","' +str(value) +'","","0","0","0","API")')
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
# check if it is extreme data
if extreme == True:
strqry = ('insert ignore into ' +tablename +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] + '","",' +data.iloc[int(n),3] +'","' +data.iloc[int(n),4] +'",",""' +data.iloc[int(n),5] +'","' +data.iloc[int(n),6] +'","","0","0","0","API")')
else:
strqry = ('insert ignore into ' +tablename +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
# check if it is extreme data
if extreme == True:
strqry = ('insert ignore into ' +tablename +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] + '","",' +data.iloc[0,3] +'","' +data.iloc[0,4] +'",",""' +data.iloc[0,5] +'","' +data.iloc[0,6] +'","","0","0","0","API")')
else:
strqry = ('insert ignore into ' +tablename +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Data stored',201
def delete(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('datee')
args = parser.parse_args()
stn_id = args.get('stn_id')
var_id = args.get('var_id')
datee = args.get('datee')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = variable.iloc[0,5] +variable.iloc[0,3]
else:
abort(404, message="Variable not found...")
qry.close
strqry='delete from ' +tablename +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'" and ' +datefield.iloc[0,1] +'="'+ datee +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Record deleted',204
api.add_resource(dailydata, "/API/data/dailydata")
class detaildata(Resource):
def get(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
varfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
vtfield = ltbfl[ltbfl['id_sec'] == 'ncTipoDDoDE']
vnfield = ltbfl[ltbfl['id_sec'] == 'ncNombreTabla']
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
valuefield = ltbfl[ltbfl['id_sec'] == 'ncValor']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('date_ini')
parser.add_argument('date_end')
parser.add_argument('datee')
args = parser.parse_args()
stn_id = args.get('stn_id')
var_id = args.get('var_id')
date_ini = args.get('date_ini')
date_end = args.get('date_end')
datee = args.get('datee')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = 'dt' +variable.iloc[0,3]
else:
abort(404, message="Variable not found...")
qry.close
# check if it is a date or a period
if datee in (None, ''):
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'>="' + str(date_ini) +'" and ' + datefield.iloc[0,1] +'<="' + str(date_end) +'"'
else:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'="' + str(datee) +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
ddata = pd.DataFrame(data=dataqry,columns=['Station','Date','Value'])
jsondata = ddata.to_json(orient="records",date_format='iso', date_unit='s')
parsed = json.loads(jsondata)
else:
abort(404, message="There is no data...")
return parsed
def post(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
varfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
vnfield = ltbfl[ltbfl['id_sec'] == 'ncNombreTabla']
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
valuefield = ltbfl[ltbfl['id_sec'] == 'ncValor']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('datee')
parser.add_argument('value')
args = parser.parse_args()
jfile = args.get('file')
stn_id = args.get('stn_id')
var_id = args.get('var_id')
datee = args.get('datee')
value = args.get('value')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = 'dt' +variable.iloc[0,3]
else:
abort(404, message="Variable not found...")
qry.close
# verify if input is a file
if jfile in (None, ''):
strqry = ('insert ignore into ' +tablename +' values("' +str(stn_id) +'","' +str(datee) +'","' +str(value) +'","' +str(value) +'","","API","0")')
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +tablename +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'","' +data.iloc[int(n),2] +'","","API","0")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +tablename +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'","' +data.iloc[0,2] +'","","API","0")')
qry.execute(strqry)
return 'Data stored',201
def delete(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('datee')
args = parser.parse_args()
stn_id = args.get('stn_id')
var_id = args.get('var_id')
datee = args.get('datee')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
if rcount > 0:
variable = | pd.DataFrame(data=datavar, dtype="string") | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 20:03:20 2020
@author: hexx
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shutil import copyfile
from myFunctions import createFolder
today = pd.to_datetime('today')
today =today.strftime("%Y-%m-%d")
today = '2020-09-12'
# Model_Date = np.load("./Model_Parameter.npy",allow_pickle='TRUE').item()
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
google_Mobility_Day = PODA_Model['ML_File_Date']
start_Date = '03-03-2020'
YYG_projection_Date=PODA_Model['YYG_File_Date']
ML_Model=PODA_Model['ML_File_Date']
fuel_mobility_factor_file = ML_Model
apple_fuel_Factor_file = PODA_Model['ML_File_Date']
model_mark =''
isopen='' #'_noreopen'
fuel_Demand_EIA = PODA_Model['Fuel_Demand_EIA'].reset_index()
fuel_Demand_EIA = fuel_Demand_EIA.set_index('Date')
fig2 = plt.figure(figsize=(6, 5))
fig3 = plt.figure(figsize=(6, 5))
ax2 = fig2.add_subplot(1, 1, 1)
ax3 = fig3.add_subplot(1, 1, 1)
Line_Style =['.-m', '-.r', '-b', '--g']
caseID =['lower', 'mean', 'upper', 'MIT']
for case_i, case in enumerate(caseID):
if case == 'mean':
caseLabel = 'Reference'
else:
caseLabel = case
COVID = PODA_Model['Data_for_Mobility_Projection_'+case]
COVID = COVID[COVID['State Name']== 'Michigan']
data_used = PODA_Model['Google_Apple_Mobility_Projection_'+case].reset_index()
data_used = data_used[(data_used['date']> pd.to_datetime(start_Date))]
data_used = data_used.set_index('date')
NHTS_Category_Share = PODA_Model['NHTS Category Share']
NHTS_State_Fuel_Share = PODA_Model['NHTS State Fuel Share']
df_StateName_Code = PODA_Model['StateName_StateCode']
cols = ['State Name']
data_used = data_used.join(df_StateName_Code.set_index(cols), on=cols, how='left')
data_used = data_used.join(NHTS_Category_Share.set_index('State Code'), on='State Code', how='left')
'''
#Google mobility-fuel correlation model
'''
#load model correlation factors
factor = PODA_Model['Google_Mobility_EIA_Factor']
# data_used['work factor'] = 1 + data_used['Workplaces']/100*factor[0]
# data_used['school factor'] = 1 + data_used['Workplaces']/100*factor[1]
# data_used['medical factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[2]
# data_used['shopping factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[3]
# data_used['social factor'] = 1 + data_used['Retail and Recreation']/100*factor[4]
# data_used['park factor'] = 1 + data_used['Parks']/100*factor[5]
# data_used['transport someone factor'] = 1+ data_used['Retail and Recreation']/100*factor[7]
# data_used['meals factor'] = 1 + data_used['Retail and Recreation']/100*factor[6]
# data_used['else factor'] = 1+ data_used['Retail and Recreation']/100*factor[7]
data_used['work factor'] = 1 + data_used['Workplaces']/100*factor[0]
data_used['school factor'] = 1 + data_used['Workplaces']/100*factor[1]
data_used['medical factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[2]
data_used['shopping factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[3]
data_used['social factor'] = 1 + data_used['Retail and Recreation']/100*factor[4]
data_used['park factor'] = 1 + data_used['Parks']/100*factor[5]
data_used['transport someone factor'] = 1+ data_used['Retail and Recreation']/100*factor[7] #Workplaces
data_used['meals factor'] = 1 + data_used['Retail and Recreation']/100*factor[6]
data_used['else factor'] = 1+ data_used['Retail and Recreation']/100*factor[7] #workplace
data_used['Google State Mobility Predict'] = (data_used['Work']*data_used['work factor'] + \
data_used['School/Daycare/Religious activity']*data_used['school factor'] + \
data_used['Medical/Dental services']*data_used['medical factor'] + \
data_used['Shopping/Errands']*data_used['shopping factor'] + \
data_used['Social/Recreational']*factor[8]*data_used['social factor'] + \
data_used['Social/Recreational']*(1-factor[8])*data_used['park factor'] + \
data_used['Meals']*data_used['meals factor'] +\
data_used['Transport someone']*data_used['transport someone factor'] + \
data_used['Something else']*data_used['else factor'])/100 + factor[9]
aa = data_used.join(NHTS_State_Fuel_Share.set_index('State Name'), on='State Name', how='left')
aa['Google fuel factor'] = aa['Google State Mobility Predict']*aa['Percentage gasoline']
aa['Apple fuel factor']=aa['Apple State Mobility Predict']*aa['Percentage gasoline']
aa['Date'] = aa.index
day_Shift = int(factor[10])
x = aa.sum(level='date')
x = x[['Google fuel factor', 'Apple fuel factor']]
# x['Date'] =x.index+pd.DateOffset(days=day_Shift)
'''
apple mobility-fuel correlation
'''
apple_x = x['Apple fuel factor'].to_numpy()
apple_x_length = len(apple_x)
apple_x=apple_x.reshape(apple_x_length, 1)
regr = PODA_Model['Apple_EIA_Regression']
# regr_coef = regr.coef_
# print('reg_coeff: ', regr_coef)
# regr_interp = regr.intercept_
# print('reg_interp: ', regr_interp)
Apple_fuel_Demand_Pred = regr.predict(apple_x)
# aa['Apple Fuel Demand Predict'] = fuel_Demand_Apple_Pred
baseline = 8722 #average of EIA between Jan 03-Feb 07(thousand bpd)
PODA_Model['EIA_Baseline'] = baseline
data_save = aa[['Date', 'State Name', 'State Code', 'Google State Mobility Predict', 'Apple State Mobility Predict']]
data_save['Google State Mobility Predict'] = data_save['Google State Mobility Predict']*100
data_save.to_excel('./Fuel Demand Projection/Mobility_State_'+YYG_projection_Date+case+isopen+'.xlsx')
x['Google Fuel Demand Predict'] = x['Google fuel factor']*baseline
x['Apple Fuel Demand Predict'] = Apple_fuel_Demand_Pred
# x.to_excel('./Fuel Demand Projection/Mobility_US_'+YYG_projection_Date+case+isopen+'.xlsx')
PODA_Model['Fuel_Demand_Projection_'+case]=x
PODA_Model['Mobility_State_Level_Projection_'+case]=data_save
fig1 = plt.figure(figsize=(6, 5))
ax1 = fig1.add_subplot(1, 1, 1)
ax1.plot(x.index, x['Google Fuel Demand Predict'], '-',
label='Google Mobility (Predicted')
ax1.plot(x.index, x['Apple Fuel Demand Predict'], '--g',
label='Apple Mobility (Predicted')
ax1.plot(fuel_Demand_EIA.index - | pd.DateOffset(days=day_Shift) | pandas.DateOffset |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics as skm
from scipy import stats
from typing import List, Union, Tuple
from macrosynergy.management.simulate_quantamental_data import make_qdf
from macrosynergy.management.shape_dfs import categories_df
class SignalReturnRelations:
"""Class for analyzing and visualizing relations between a signal and subsequent return
:param <pd.Dataframe> df: standardized data frame with the following necessary columns:
'cid', 'xcats', 'real_date' and 'value.
:param <str> ret: return category.
:param <str> sig: signal category.
:param <List[str]> cids: cross sections to be considered. Default is all in the data frame.
:param <str> start: earliest date in ISO format. Default is None and earliest date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date in df is used.
:param <dict> blacklist: cross sections with date ranges that should be excluded from the data frame.
:param <str> freq: letter denoting frequency at which the series are to be sampled.
This must be one of 'D', 'W', 'M', 'Q', 'A'. Default is 'M'.
:param <int> fwin: Forward window of return category in base periods. Default is 1.
"""
def __init__(self, df: pd.DataFrame, ret: str, sig: str, cids: List[str] = None,
start: str = None, end: str = None, fwin: int = 1, blacklist: dict = None,
freq: str = 'M'):
self.df = categories_df(df, [ret, sig], cids, 'value', start=start, end=end, freq=freq, blacklist=blacklist,
lag=1, fwin=fwin, xcat_aggs=['mean', 'last'])
self.ret = ret
self.sig = sig
self.freq = freq
self.cids = list(np.sort(self.df.index.get_level_values(0).unique()))
self.df_cs = self.panel_relations(cs_type='cids')
self.df_ys = self.panel_relations(cs_type='years')
self.dic_freq = {'D': 'daily', 'W': 'weekly', 'M': 'monthly', 'Q': 'quarterly', 'A': 'annual'}
"""Creates a dataframe of return and signal in the appropriate form for subsequent analysis."""
def panel_relations(self, cs_type: str = 'cids'):
"""Creates a dataframe with information on the signal-return relation across cids/years and the panel."""
assert cs_type in ['cids', 'years']
if cs_type == 'cids':
df = self.df.dropna(how='any')
css = self.cids
else:
df = self.df.dropna(how='any')
df['year'] = np.array(df.reset_index(level=1)['real_date'].dt.year)
css = [str(i) for i in df['year'].unique()]
statms = ['accuracy', 'bal_accuracy', 'f1_score', 'pearson', 'pearson_pval', 'kendall', 'kendall_pval']
df_out = pd.DataFrame(index=['Panel', 'Mean', 'PosRatio'] + css, columns=statms)
for cs in (css + ['Panel']):
if cs in css:
if cs_type == 'cids':
df_cs = df.loc[cs,]
else:
df_cs = df[df['year'] == float(cs)]
elif cs == 'Panel':
df_cs = df
ret_signs, sig_signs = np.sign(df_cs[self.ret]), np.sign(df_cs[self.sig])
df_out.loc[cs, 'accuracy'] = skm.accuracy_score(sig_signs, ret_signs)
df_out.loc[cs, 'bal_accuracy'] = skm.balanced_accuracy_score(sig_signs, ret_signs)
df_out.loc[cs, 'f1_score'] = skm.f1_score(sig_signs, ret_signs, average='weighted')
ret_vals, sig_vals = df_cs[self.ret], df_cs[self.sig]
df_out.loc[cs, ['kendall', 'kendall_pval']] = stats.kendalltau(ret_vals, sig_vals)
df_out.loc[cs, ['pearson', 'pearson_pval']] = stats.pearsonr(ret_vals, sig_vals)
df_out.loc['Mean', :] = df_out.loc[css, :].mean()
above50s = statms[0:3]
df_out.loc['PosRatio', above50s] = (df_out.loc[css, above50s] > 0.5).mean()
above0s = [statms[i] for i in [3, 5]]
df_out.loc['PosRatio', above0s] = (df_out.loc[css, above0s] > 0).mean()
below50s = [statms[i] for i in [4, 6]]
pos_pvals = np.mean(np.array(df_out.loc[css, below50s] < 0.5) * np.array(df_out.loc[css, above0s] > 0), axis=0)
df_out.loc['PosRatio', below50s] = pos_pvals # positive correlations with error probabilities < 50%
return df_out
def cross_section_table(self):
"""Returns a dataframe with information on the signal-return relation across sections and the panel."""
return self.df_cs.round(decimals=3)
def yearly_table(self):
"""Returns dataframe with information on the signal-return relation across years and the panel."""
return self.df_ys.round(decimals=3)
def accuracy_bars(self, type: str = 'cross_section', title: str = None, size: Tuple[float] = None,
legend_pos: str = 'best'):
"""Bars of overall and balanced accuracy
:param <str> type: type of segment over which bars are drawn. Must be 'cross_section' (default) or 'years'
:param <str> title: chart header. Default will be applied if none is chosen.
:param <Tuple[float]> size: 2-tuple of width and height of plot. Default will be applied if none is chosen.
:param <str> legend_pos: position of legend box. Default is 'best'. See matplotlib.pyplot.legend.
"""
assert type in ['cross_section', 'years']
df_xs = self.df_cs if type == 'cross_section' else self.df_ys
dfx = df_xs[~df_xs.index.isin(['PosRatio'])]
if title is None:
title = f'Accuracy for sign prediction of {self.ret} based on {self.sig} ' \
f'at {self.dic_freq[self.freq]} frequency'
if size is None:
size = (np.max([dfx.shape[0]/2, 8]), 6)
plt.style.use('seaborn')
plt.figure(figsize=size)
x_indexes = np.arange(len(dfx.index)) # generic x index
w = 0.4 # offset parameter, related to width of bar
plt.bar(x_indexes - w / 2, dfx['accuracy'], label='Accuracy', width=w, color='lightblue')
plt.bar(x_indexes + w / 2, dfx['bal_accuracy'], label='Balanced Accuracy', width=w, color='steelblue')
plt.xticks(ticks=x_indexes, labels=dfx.index, rotation=0) # customize x ticks/labels
plt.axhline(y=0.5, color='black', linestyle='-', linewidth=0.5)
plt.ylim(np.round(np.max(dfx.loc[:, ['accuracy', 'bal_accuracy']].min().min()-0.03, 0), 2))
plt.title(title)
plt.legend(loc=legend_pos)
plt.show()
def correlation_bars(self, type: str = 'cross_section', title: str = None, size: Tuple[float] = None,
legend_pos: str = 'best'):
"""Correlation coefficients and significance
:param <str> type: type of segment over which bars are drawn. Must be 'cross_section' (default) or 'years'
:param <str> title: chart header. Default will be applied if none is chosen.
:param <Tuple[float]> size: 2-tuple of width and height of plot. Default will be applied if none is chosen.
:param <str> legend_pos: position of legend box. Default is 'best'. See matplotlib.pyplot.legend.
"""
df_xs = self.df_cs if type == 'cross_section' else self.df_ys
dfx = df_xs[~df_xs.index.isin(['PosRatio', 'Mean'])]
pprobs = np.array([(1 - pv) * (np.sign(cc) + 1) / 2 for pv, cc in zip(dfx['pearson_pval'], dfx['pearson'])])
pprobs[pprobs == 0] = 0.01 # token small value for bar
kprobs = np.array([(1 - pv) * (np.sign(cc) + 1) / 2 for pv, cc in zip(dfx['kendall_pval'], dfx['kendall'])])
kprobs[kprobs == 0] = 0.01 # token small value for bar
if title is None:
title = f'Positive correlation probability of {self.ret} and lagged {self.sig} ' \
f'at {self.dic_freq[self.freq]} frequency'
if size is None:
size = (np.max([dfx.shape[0]/2, 8]), 6)
plt.style.use('seaborn')
plt.figure(figsize=size)
x_indexes = np.arange(len(dfx.index)) # generic x index
w = 0.4 # offset parameter, related to width of bar
plt.bar(x_indexes - w / 2, pprobs, label='Pearson', width=w, color='lightblue')
plt.bar(x_indexes + w / 2, kprobs, label='Kendall', width=w, color='steelblue')
plt.xticks(ticks=x_indexes, labels=dfx.index, rotation=0) # customize x ticks/labels
plt.axhline(y=0.95, color='orange', linestyle='--', linewidth=0.5, label='95% probability')
plt.axhline(y=0.99, color='red', linestyle='--', linewidth=0.5, label='99% probability')
plt.title(title)
plt.legend(loc=legend_pos)
plt.show()
if __name__ == "__main__":
cids = ['AUD', 'CAD', 'GBP', 'NZD']
xcats = ['XR', 'CRY', 'GROWTH', 'INFL']
df_cids = | pd.DataFrame(index=cids, columns=['earliest', 'latest', 'mean_add', 'sd_mult']) | pandas.DataFrame |
import pandas as pd
customer_calls = | pd.read_excel('17-01-2019 263951.xlsx') | pandas.read_excel |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = | pd.Series([5.5, 6.6, 7.7, 8.8]) | pandas.Series |
from unittest import TestCase
from nose_parameterized import parameterized
from collections import OrderedDict
import os
import gzip
from pandas import (
Series,
DataFrame,
date_range,
Timestamp,
read_csv
)
from pandas.util.testing import assert_frame_equal
from numpy import (
arange,
zeros_like,
nan,
)
import warnings
from pyfolio.utils import (to_utc, to_series, check_intraday,
detect_intraday, estimate_intraday)
from pyfolio.pos import (get_percent_alloc,
extract_pos,
get_sector_exposures,
get_max_median_position_concentration)
class PositionsTestCase(TestCase):
dates = date_range(start='2015-01-01', freq='D', periods=20)
def test_get_percent_alloc(self):
raw_data = arange(15, dtype=float).reshape(5, 3)
# Make the first column negative to test absolute magnitudes.
raw_data[:, 0] *= -1
frame = DataFrame(
raw_data,
index= | date_range('01-01-2015', freq='D', periods=5) | pandas.date_range |
def concat_networks(p_in_dir_data
, p_in_dir_pred
, p_out_file
, file_suffix
, flag_matrix
, p_in_reg
, p_in_target
, flag_method
, l_p_in_net
, nbr_fold
):
from pandas import read_csv, concat, DataFrame, pivot_table
from json import load
if flag_method == 'with_and_without_de':
df_net_with_de = read_csv(l_p_in_net[0], header=None, sep='\t')
df_net_with_de.index = [(reg, target) for reg, target in zip(list(df_net_with_de.iloc[:, 0])
, list(df_net_with_de.iloc[:, 1]))]
df_net_without_de = read_csv(l_p_in_net[1], header=None, sep='\t')
df_net_without_de.index = [(reg, target) for reg, target in zip(list(df_net_without_de.iloc[:, 0]), list(df_net_without_de.iloc[:, 1]))]
# remove edges that were predicted using DE network
df_net_without_de_filtered = df_net_without_de.loc[~df_net_without_de.index.isin(df_net_with_de.index), :]
df_net_all = concat([df_net_with_de, df_net_without_de_filtered], axis='index')
df_net_all.to_csv(p_out_file, header=False, index=False, sep='\t')
if flag_method == 'a':
df_net_all = DataFrame()
for p_df_net in l_p_in_net:
if p_df_net != 'NONE':
df_net = read_csv(p_df_net, header=None, sep='\t')
df_net_all = concat([df_net, df_net_all], axis='index')
df_net_all.to_csv(p_out_file, header=False, index=False, sep='\t')
elif flag_method == 'concat_cv':
# concatenate the sub-networks
df_net = DataFrame()
for i in range(nbr_fold):
p_pred_test = p_in_dir_pred + "fold" + str(i) + (file_suffix if file_suffix else "_pred_test.tsv")
df = read_csv(p_pred_test, header=None, sep="\t")
if len(list(df.columns)) > 3: # matrix format
l_reg = list(read_csv(p_in_dir_data + "fold" + str(i) + "_test_reg", header=None, sep="\t")[0])
df.index = l_reg
df_net = concat([df_net, df], axis="index")
if len(list(df.columns)) > 3: # reindex the matrix in case of matrix
# extract info about regulators from config file
# d_run_config__value = load(open(P_CONFIG, 'r'))
# p_reg = d_run_config__value['p_reg']
l_reg_all = list(read_csv(p_reg, header=None)[0])
df_net = df_net.reindex(l_reg_all, axis='index')
elif flag_matrix == "ON":
df_net.columns = ['REGULATOR', 'TARGET', 'VALUE']
df_net = pivot_table(df_net, values='VALUE', index=['REGULATOR'], columns=['TARGET'])
l_reg = list( | read_csv(p_in_reg, header=None) | pandas.read_csv |
# Databricks notebook source
# MAGIC %md
# MAGIC Clustering the Seattle Library Collection into Topics
# MAGIC =======================================================
# MAGIC
# MAGIC The data used in this demo is available at: [https://catalog.data.gov/dataset?tags=seattle-public-library](https://catalog.data.gov/dataset?tags=seattle-public-library "Seattle Public Library Dataset")
# MAGIC
# MAGIC 
# MAGIC
# MAGIC Use case - to assign items held by the Seattle Library to different topics using the item description.
# MAGIC
# MAGIC This is an unsupervised learning problem solved using a compination of TFIDF vectorising and the K-means clustering algorithm.
# MAGIC
# MAGIC Import Python Libraries
# MAGIC -----------------------
# COMMAND ----------
# MAGIC %sh /databricks/python/bin/pip install nltk
# COMMAND ----------
import numpy as np
import pandas as pd
import more_itertools
import re
import mpld3
import matplotlib.pyplot as plt
import nltk
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.manifold import MDS
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD as SVD
from pyspark.ml.feature import Word2Vec, StringIndexer, RegexTokenizer, StopWordsRemover
from pyspark.sql.functions import *
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import ClusteringEvaluator
# from sparknlp.annotator import *
# from sparknlp.common import *
# from sparknlp.base import *
# COMMAND ----------
# MAGIC %md
# MAGIC Login to Snowflake and Import Data
# MAGIC ----------------------------------
# COMMAND ----------
# Use secret manager to get the login name and password for the Snowflake user
username = dbutils.secrets.get(scope="snowflake-credentials", key="username")
password = dbutils.secrets.get(scope="snowflake-credentials", key="password")
# snowflake connection options
options = dict(
sfUrl="datalytyx.east-us-2.azure.snowflakecomputing.com",
sfUser=str("WILLHOLTAM"),
sfPassword=str("<PASSWORD>"),
sfDatabase="DATABRICKS_DEMO",
sfRole="DATABRICKS",
sfSchema="SEATTLE_LIBRARY",
sfWarehouse="DATASCIENCE_WH"
)
df = spark.read \
.format("snowflake") \
.options(**options) \
.option("query",
"select BIBNUM, TITLE, AUTHOR, PUBLICATIONYEAR, SUBJECTS from LIBRARY_COLLECTION_INVENTORY where reportdate in (\
'2017-09-01T00:00:00',\
'2017-10-01T00:00:00',\
'2017-11-01T00:00:00',\
'2017-12-01T00:00:00',\
'2018-01-01T00:00:00',\
'2018-01-01T00:00:00',\
'2018-02-01T00:00:00',\
'2018-02-01T00:00:00',\
'2018-03-01T00:00:00',\
'2018-04-01T00:00:00',\
'2018-05-01T00:00:00',\
'2018-06-01T00:00:00',\
'2018-07-01T00:00:00'\
)"
) \
.load() \
.limit(1000)
df = df.cache()
df_pandas = df.toPandas()
# COMMAND ----------
# MAGIC %md
# MAGIC Preview the Data
# MAGIC ----------------
# COMMAND ----------
df.printSchema()
# COMMAND ----------
display(df.head(3))
# COMMAND ----------
df.count()
# COMMAND ----------
display(df.describe())
# COMMAND ----------
# MAGIC %md
# MAGIC Set Initial Parameters
# MAGIC ----------------------
# COMMAND ----------
nltk.download('stopwords')
stop = nltk.corpus.stopwords.words('english')
# COMMAND ----------
# MAGIC %md
# MAGIC Pipeline
# MAGIC --------
# COMMAND ----------
df_nan_removed = df.fillna(value=pd.np.nan)
df_droped_empty_rows = df_nan_removed.dropna(how='any')
df_droped_null = df_droped_empty_rows.na.drop()
# COMMAND ----------
tokenizer = RegexTokenizer(pattern="[^a-zA-Z-_']", inputCol="SUBJECTS", outputCol="WORDS")
df = tokenizer.transform(df_droped_null)
display(df)
# COMMAND ----------
remover = StopWordsRemover(inputCol="WORDS", outputCol="FILTERED")
df = remover.transform(df)
display(df)
# COMMAND ----------
stemmer = SnowballStemmer("english")
tokens = []
stems = []
for row in df.select("FILTERED").rdd.collect():
for words in row:
tokens.extend([t for t in words])
stems.extend([stemmer.stem(t) for t in words])
vocab_frame = pd.DataFrame({"WORDS": tokens}, index=stems)
# COMMAND ----------
from pyspark.sql.column import Column
from pyspark.sql.types import ArrayType, StringType
rdd = df.select("FILTERED").rdd.collect()
stemmed = []
for row in rdd:
for list_ in row:
stemmed.append([stemmer.stem(element) for element in list_])
df_new = | pd.DataFrame(data={'STEMMED': stemmed}) | pandas.DataFrame |
from .get_tmy_epw_file import get_tmy_epw_file
from .get_noaa_isd_lite_file import get_noaa_isd_lite_file
from .meteorology import Meteorology
from .analyze_noaa_isd_lite_file import analyze_noaa_isd_lite_file
import tempfile
import pandas as pd
import numpy as np
import os
import pkg_resources
from typing import Tuple
from calendar import isleap
from ._logging import _logger
# We buffer this path so that we don't create tons of temporary directories if the function is called many
# times, and so that calling it multiple times with the same WMO/year combination won't result in the same
# file being generated multiple times.
_tempdir_amy_epw = tempfile.mkdtemp()
def create_amy_epw_file(
wmo_index: int,
year: int,
*,
max_records_to_interpolate: int = 6,
max_records_to_impute: int = 48,
max_missing_amy_rows: int = 700,
amy_epw_dir: str = None,
tmy_epw_dir: str = None,
amy_dir: str = None,
amy_files: Tuple[str, str] = None,
allow_downloads: bool = False
) -> str:
"""
Combine data from a Typical Meteorological Year (TMY) EPW file and Actual Meteorological Year (AMY)
observed data to generate an AMY EPW file for a single calendar year at a given WMO.
:param wmo_index: The WMO Index of the weather station for which the EPW file should be generated.
Currently only weather stations in the United States are supported.
:param year: The year for which the EPW should be generated
:param amy_epw_dir: The directory into which the generated AMY EPW file should be written.
If not defined, a temporary directory will be created
:param tmy_epw_dir: The source directory for TMY EPW files. If a file for the requested WMO Index is
already present, it will be used. Otherwise a TMY EPW file will be downloaded (see this package's
get_tmy_epw_file() function for details). If no directory is given, the package's default
directory (in data/tmy_epw_files/ in the package's directory) will be used, which will allow AMY
files to be reused for future calls instead of downloading them repeatedly, which is quite time
consuming.
:param amy_dir: The source directory for AMY files. If a file for the requested WMO Index and year
is already present, it will be used. Otherwise a TMY EPW file will be downloaded (see this package's
get_noaa_isd_lite_file() function for details). If no directory is given, the package's default
directory (in data/ in the package's directory) will be used, which will allow AMY files to be
reused for future calls instead of downloading them repeatedly, which is quite time consuming.
:param amy_files: Instead of specifying amy_dir and allowing this method to try to find the appropriate
file, you can use this argument to specify the actual files that should be used. There should be
two files - the first the AMY file for "year", and the second the AMY file for the subsequent year,
which is required to support shifting the timezone from GMT to the timezone of the observed meteorology.
:param max_records_to_interpolate: The maximum length of sequence for which linear interpolation will be
used to replace missing values. See the documentation of _handle_missing_values() below for details.
:param max_records_to_impute: The maximum length of sequence for which imputation will be used to replace
missing values. See the documentation of _handle_missing_values() below for details.
:param max_missing_amy_rows: The maximum total number of missing rows to permit in a year's AMY file.
:param allow_downloads: If this is set to True, then any missing TMY or AMY files required to generate the
requested AMY EPW file will be downloaded from publicly available online catalogs. Otherwise, those files
being missing will result in an error being raised.
:return: The absolute path of the generated AMY EPW file
"""
if amy_dir is not None and amy_files is not None:
raise Exception("It is not possible to specify both amy_dir and amy_files")
if amy_epw_dir is None:
global _tempdir_amy_epw
amy_epw_dir = _tempdir_amy_epw
_logger.info(f"No amy_epw_dir was specified - generated AMY EPWs will be stored in {amy_epw_dir}")
# Either amy_files is specified, in which case we use the specified paths, or amy_dir is specified,
# in which case we will search that directory for AMY files, or neither is specified, in which case
# we will fall back to a generated temporary directory.
if amy_files is not None:
for p in amy_files:
if not os.path.exists(p):
raise Exception(f'Path {p} does not exist')
amy_file_path, amy_next_year_file_path = amy_files
else:
if amy_dir is None:
amy_dir = pkg_resources.resource_filename("diyepw", "data/noaa_isd_lite_files")
_logger.info(f"No amy_dir was specified - downloaded AMY files will be stored in the default location at {amy_dir}")
amy_file_path = get_noaa_isd_lite_file(wmo_index, year, output_dir=amy_dir, allow_downloads=allow_downloads)
amy_next_year_file_path = get_noaa_isd_lite_file(wmo_index, year+1, output_dir=amy_dir, allow_downloads=allow_downloads)
if max_missing_amy_rows is not None:
amy_file_analysis = analyze_noaa_isd_lite_file(amy_file_path)
if amy_file_analysis['total_rows_missing'] > max_missing_amy_rows:
raise Exception(f"File is missing {amy_file_analysis['total_rows_missing']} rows, but maximum allowed is {max_missing_amy_rows}")
# Read in the corresponding TMY3 EPW file.
tmy_epw_file_path = get_tmy_epw_file(wmo_index, tmy_epw_dir, allow_downloads=allow_downloads)
tmy = Meteorology.from_tmy3_file(tmy_epw_file_path)
# If the year we are generating an AMY EPW for is a leap year, then we need to add the leap day to the TMY data.
# We'll do that by adding the day with all empty values, then using the same routine we do to interpolate/impute
# missing data in our AMY files to fill in the missing data.
if isleap(year):
_logger.info(f"{year} is a leap year, using the interpolation strategy to populate TMY data for Feb. 29")
for hour in range(1, 25):
col_names = tmy.observations.columns.to_list()
new_row_vals = [1982, 2, 29, hour, 0]
new_row_vals.extend(np.repeat(np.nan, len(col_names) - len(new_row_vals)))
new_row = pd.DataFrame([new_row_vals], columns=col_names)
tmy.observations = tmy.observations.append(new_row)
# We sort by month, day and hour. We do *not* sort by year, because the year column doesn't matter and because
# it is in any case not consistent throughout a TMY data set
tmy.observations = tmy.observations.sort_values(by=["month", "day", "hour"])
#TODO: This is where I left off Thursday night. This call is changing the data types of the date fields into
# floating point values, which breaks the EPW file
_handle_missing_values(
tmy.observations,
max_to_interpolate=0, # We only want the imputation strategy to be used for the 24 missing hours
max_to_impute=24,
step=1,
imputation_range=14 * 24, # Two weeks, in hours
imputation_step=24,
ignore_columns=["Flags"] # The TMY files we use seem to be missing data for this field entirely
)
amy_epw_file_name = f"{tmy.country}_{tmy.state}_{tmy.city}.{tmy.station_number}_AMY_{year}.epw"
amy_epw_file_name = amy_epw_file_name.replace(" ", "-")
amy_epw_file_path = os.path.join(amy_epw_dir, amy_epw_file_name)
if os.path.exists(amy_epw_file_path):
_logger.info(f"File already exists at {amy_epw_file_path}, so a new one won't be generated.")
return amy_epw_file_path
# Read in the NOAA AMY file for the station for the requested year as well as the first 23 hours (sufficient
# to handle the largest possible timezone shift) of the subsequent year - the subsequent year's data will be
# used to populate the last hours of the year because of the time shift that we perform, which moves the first
# hours of January 1 into the final hours of December 31.
amy_df = pd.read_csv(amy_file_path, delim_whitespace=True, header=None)
amy_next_year_df = pd.read_csv(amy_next_year_file_path, delim_whitespace=True, header=None, nrows=23)
amy_df = pd.concat([amy_df, amy_next_year_df]).reset_index(drop=True)
amy_df = _set_noaa_df_columns(amy_df)
amy_df = _create_timestamp_index_for_noaa_df(amy_df)
# Shift the timestamp (index) to match the time zone of the WMO station.
amy_df = amy_df.shift(periods= tmy.timezone_gmt_offset, freq='H')
# Remove time steps that aren't applicable to the year of interest
amy_df = _map_noaa_df_to_year(amy_df, year)
_handle_missing_values(
amy_df,
step=pd.Timedelta("1h"),
max_to_interpolate=max_records_to_interpolate,
max_to_impute=max_records_to_impute,
imputation_range=pd.Timedelta("2w"),
imputation_step=pd.Timedelta("1d"),
missing_values=[np.nan, -9999.]
)
# Initialize new column for station pressure (not strictly necessary)
amy_df['Station_Pressure'] = None
# Convert sea level pressure in NOAA df to atmospheric station pressure in Pa.
for index in amy_df.index:
stp = _convert_sea_level_pressure_to_station_pressure(amy_df['Sea_Level_Pressure'][index], tmy.elevation)
amy_df.loc[index, 'Station_Pressure'] = stp
# Change observation values to the values taken from the AMY data
tmy.set('year', year)
tmy.set('Tdb', [i / 10 for i in amy_df['Air_Temperature']]) # Convert AMY value to degrees C
tmy.set('Tdew', [i / 10 for i in amy_df['Dew_Point_Temperature']]) # Convert AMY value to degrees C
tmy.set('Patm', amy_df['Station_Pressure'])
tmy.set('Wdir', amy_df['Wind_Direction'])
tmy.set('Wspeed', [i / 10 for i in amy_df['Wind_Speed']]) # Convert AMY value to m/sec
# Check for violations of EPW file standards
epw_rule_violations = tmy.validate_against_epw_rules()
if len(epw_rule_violations) > 0:
raise Exception("EPW validation failed:\n" + "\n".join(epw_rule_violations))
# Write new EPW file if no validation errors were found.
tmy.write_epw(amy_epw_file_path)
return amy_epw_file_path
def _set_noaa_df_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Add headings to a NOAA ISD Lite formatted dataframe, and Drop columns for observations
that won't be used in populating the EPW files.
"""
list_of_columns = ["Year", "Month", "Day", "Hour", "Air_Temperature",
"Dew_Point_Temperature", "Sea_Level_Pressure", "Wind_Direction",
"Wind_Speed", "Sky_Condition_Total_Coverage_Code",
"Liquid_Precipitation_Depth_Dimension_1H", "Liquid_Precipitation_Depth_Dimension_6H"]
df.columns = list_of_columns
# Remove unnecessary columns
df = df.drop(columns=[
'Sky_Condition_Total_Coverage_Code',
'Liquid_Precipitation_Depth_Dimension_1H',
'Liquid_Precipitation_Depth_Dimension_6H'
])
return df
def _create_timestamp_index_for_noaa_df(df: pd.DataFrame) -> pd.DataFrame:
"""Convert the year, month, day fields of a NOAA ISD Lite DataFrame into
a timestamp and make that timestamp the index of the DataFrame
:param df:
:return:
"""
df['timestamp'] = pd.to_datetime(pd.DataFrame({'year': df['Year'],
'month': df['Month'],
'day': df['Day'],
'hour': df['Hour']}))
df = df.set_index('timestamp')
# Remove unnecessary columns
df = df.drop(columns=['Year', 'Month', 'Hour', 'Day'])
return df
def _map_noaa_df_to_year(df, year):
"""Add headings to a NOAA ISD Lite formatted dataframe, convert year-month-day-hour columns to a timestamp,
set the timestamp as index, and make sure each hour of the DF's range has a timestamp, regardless of whether
there are any observations in that hour. Drop columns for observations that won't be used in populating the
EPW files.
The assumption of this function is that the dataframe ranges from the beginning of the year to some
"""
# Create series of continuous timestamp values for that year
all_timestamps = pd.date_range(str(year) + '-01-01 00:00:00', str(year) + '-12-31 23:00:00', freq='H')
all_timestamps = pd.DataFrame(all_timestamps, columns=['timestamp'])
# Merge to one dataframe containing all continuous timestamp values.
df = pd.merge(all_timestamps, df, how='left', left_on='timestamp', right_index=True)
df = df.set_index('timestamp')
return df
def _handle_missing_values(
df: pd.DataFrame, *, step, max_to_interpolate: int, max_to_impute: int,
imputation_range, imputation_step, missing_values: list = None, ignore_columns=[]
):
"""
Look for missing values in a DataFrame. If possible, the missing values will be
populated in place, using one of two strategies:
If the missing values are in a contiguous block up to the length defined by max_to_interpolate,
the values will linearly interpolated using the previous and following values.
Otherwise, if the missing values are in a contiguous block up to the length defined by
max_to_impute, the values will be imputed by going back through the indices by
imputation_range, then stepping through by step sizes defined by imputation_step
until the index that is imputation_range ahead of the missing value is found, and
averaging all values encountered. For example, assuming a dataframe indexed by timestamp,
if imputation_range is two weeks and imputation_step is 24 hours, a missing value will
be imputed by calculating the average value at the same time of day every day going back
two weeks and forward two weeks from the missing row.
Otherwise, if the DataFrame contains at least one contiguous block of missing values
larger than max_to_impute, it will be left unchanged, and an Exception will be raised.
:param df: The dataframe to be searched for missing values.
:param step: The step size to use in considering whether the indexes of the dataframe are
contiguous. If two indices are one step apart, they are neighbors in a contiguous block.
Otherwise they do not belong to the same contiguous block.
:param max_to_interpolate: The maximum length of contiguous block to treat with the
interpolation strategy described above.
:param max_to_impute: The maximum length of contiguous block to treat with the imputation
strategy described above.
:param imputation_range: The distance before and after a missing record that will be searched
for values to average when imputing a missing value
:param imputation_step: The step-size to use in finding values to impute from, as described
in the imputation strategy above.
:param missing_values: Values matching any value in this list will be treated as missing. If not
passed, defaults to numpy.nan
:param ignore_columns: Columns listed here will be skipped; their data will be left unchanged, and
no checks will be made of whether they are missing overly large segments of values.
:return:
"""
if missing_values is None: # pragma: no cover - We don't currently have any calls that don't specify this argument
missing_values = [np.nan]
def get_indices_to_replace(df, col_name):
indices_to_replace = df.index[df[col_name].isna()].tolist()
indices_to_replace = _split_list_into_contiguous_segments(
indices_to_replace,
step=step
)
return indices_to_replace
for col_name in (c for c in df if c not in ignore_columns):
# TODO: This is the line that is breaking the dates. It seems like we dodged the bullet here for this long
# just because we always converted the date fields into an index before we added interpolating for the
# leap day data
if df[col_name].isin(missing_values).any():
df.loc[df[col_name].isin(missing_values), col_name] = np.nan
indices_to_replace = get_indices_to_replace(df, col_name)
# There is no work to be done on this column if it has no missing data
if len(indices_to_replace) == 0: # pragma: no cover
continue
# max(..., key=len) gives us the longest sequence, then we use len() to get that sequence's length
max_sequence_length = len(max(indices_to_replace, key=len))
# We raise an exception if a column has too many sequential missing rows; it's up to the calling
# code to decide how we are going to handle records that can't be processed for this reason.
if max_sequence_length > max_to_impute: # pragma: no cover - We check for this case before this function is called
raise Exception("The longest set of missing records for {} is {}, but the max allowed is {}".format(
col_name, max_sequence_length, max_to_impute
))
# We make two passes to fill in missing records: The first pass uses the imputation strategy described
# in this function's doc comment to fill in any gaps that are larger than max_to_interpolate. That
# pass leaves behind any sequences that are smaller than that limit, and also leaves behind the first
# and last item in any imputed sequence, which are also interpolated (i.e. set to the average of the imputed
# value and the observed value on either side) to smooth out the transition between computed and observed
# values.
for indices in indices_to_replace:
# Any blocks within our interpolation limit are skipped - they'll be filled in by the interpolate()
# call below
if len(indices) <= max_to_interpolate:
continue
# We will perform imputation on all the elements in the chunk *except* for the first and last
# ones, which will be interpolated to smooth out the transition between computed and observed values
indices_to_impute = indices[1:-1]
# Set each missing value to the average of all the values in the range extending from imputation_range
# indices behind to imputation_range indices ahead, walking through that range in steps whose size are
# set by imputation_step.
for index_to_impute in indices_to_impute:
replacement_value_index = index_to_impute - imputation_range
replacement_values = []
while replacement_value_index <= index_to_impute + imputation_range:
if replacement_value_index in df.index:
replacement_values.append(df[col_name][replacement_value_index])
replacement_value_index += imputation_step
# Take the mean of the values pulled. Will ignore NaNs.
df[col_name][index_to_impute] = | pd.Series(replacement_values, dtype=np.float64) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
# In[2]:
train_encoded = pd.read_csv("../data/train_store_encoded_onehot.csv")
# In[3]:
train_df = pd.read_csv("../data/train.csv")
store_df = | pd.read_csv("../data/store.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with | tm.assert_raises_regex(TypeError, mutable_regex) | pandas.util.testing.assert_raises_regex |
import os
import pandas as pd
from pympi import Eaf
from ChildProject.projects import ChildProject
from ChildProject.pipelines.samplers import PeriodicSampler
from ChildProject.pipelines.eafbuilder import EafBuilderPipeline
def test_periodic():
os.makedirs('output/eaf', exist_ok = True)
project = ChildProject('examples/valid_raw_data')
project.read()
sampler = PeriodicSampler(project, 500, 500, 250, recordings = ['sound.wav'])
sampler.sample()
sampler.segments.to_csv('output/eaf/segments.csv')
eaf_builder = EafBuilderPipeline()
eaf_builder.run(
destination = 'output/eaf',
segments = 'output/eaf/segments.csv',
eaf_type = 'periodic',
template = 'basic',
context_onset = 250,
context_offset = 250
)
eaf = Eaf('output/eaf/sound/sound_periodic_basic.eaf')
code = eaf.tiers['code_periodic'][0]
segments = []
for pid in code:
(start_ts, end_ts, value, svg_ref) = code[pid]
(start_t, end_t) = (eaf.timeslots[start_ts], eaf.timeslots[end_ts])
segments.append({'segment_onset': int(start_t), 'segment_offset': int(end_t)})
segments = | pd.DataFrame(segments) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.strings.test_strings import assert_series_or_index_equal
@pytest.mark.parametrize("other", [None, Series, Index])
def test_str_cat_name(index_or_series, other):
# GH 21053
box = index_or_series
values = ["a", "b"]
if other:
other = other(values)
else:
other = values
result = box(values, name="name").str.cat(other, sep=",")
assert result.name == "name"
def test_str_cat(index_or_series):
box = index_or_series
# test_cat above tests "str_cat" from ndarray;
# here testing "str.cat" from Series/Index to ndarray/list
s = box(["a", "a", "b", "b", "c", np.nan])
# single array
result = s.str.cat()
expected = "aabbc"
assert result == expected
result = s.str.cat(na_rep="-")
expected = "aabbc-"
assert result == expected
result = s.str.cat(sep="_", na_rep="NA")
expected = "a_a_b_b_c_NA"
assert result == expected
t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)
expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])
# Series/Index with array
result = s.str.cat(t, na_rep="-")
assert_series_or_index_equal(result, expected)
# Series/Index with list
result = s.str.cat(list(t), na_rep="-")
assert_series_or_index_equal(result, expected)
# errors for incorrect lengths
rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
z = Series(["1", "2", "3"])
with pytest.raises(ValueError, match=rgx):
s.str.cat(z.values)
with pytest.raises(ValueError, match=rgx):
s.str.cat(list(z))
def test_str_cat_raises_intuitive_error(index_or_series):
# GH 11334
box = index_or_series
s = box(["a", "b", "c", "d"])
message = "Did you mean to supply a `sep` keyword?"
with pytest.raises(ValueError, match=message):
s.str.cat("|")
with pytest.raises(ValueError, match=message):
s.str.cat(" ")
@pytest.mark.parametrize("sep", ["", None])
@pytest.mark.parametrize("dtype_target", ["object", "category"])
@pytest.mark.parametrize("dtype_caller", ["object", "category"])
def test_str_cat_categorical(index_or_series, dtype_caller, dtype_target, sep):
box = index_or_series
s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
s = s if box == Index else Series(s, index=s)
t = Index(["b", "a", "b", "c"], dtype=dtype_target)
expected = Index(["ab", "aa", "bb", "ac"])
expected = expected if box == Index else | Series(expected, index=s) | pandas.Series |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
| pd.Series([1] * 10 + [0], index=data.index) | pandas.Series |
import pandas as pd
from fbd_interpreter.icecream.plot_utils import detect_axis_range
def test_detect_axis_range() -> None:
assert detect_axis_range(None) is None
assert detect_axis_range(pd.Series([], dtype="float64")) is None
assert detect_axis_range(pd.Series(["a", "b"])) is None
assert detect_axis_range(pd.Series([0, 1]), | pd.Series([0.5, 0.5]) | pandas.Series |
import math
import requests
import os
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import sys
import math
from datetime import datetime
from glob import glob
from datetime import timedelta
plt.style.use('ggplot')
from mpl_toolkits.basemap import Basemap
from igrf12py.igrf12fun import runigrf12, plotigrf
base_url = 'http://www.ndbc.noaa.gov/view_text_file.php?filename=%s&dir=data/historical/stdmet/'
# these are buoys within the drifter region that were active in 2012/2013
buoy_list = {
46002:(42.614, -130.490),
46005:(45.958, -131.000),
46011:(34.956, -121.019),
46012:(37.363, -122.881),
46013:(38.242, -123.301),
46015:(42.764, -124.832),
46029:(46.159, -124.514),
46047:(32.403, -119.536),
46061:(47.353, -124.731),
46087:(48.494, -124.728),
46089:(45.893, -125.819),
46211:(46.858, -124.244),
46229:(43.767, -124.549),
46239:(36.342, -122.102),
46246:(49.904, -145.243),
46089:(45.893, -125.819),
'cdea2':(56.388, -134.637)}
def compute_distance(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta', phi')
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# multiply by radius of earth in km
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc*6371
def get_request(stationid, year, dirp=''):
fname = "%sh%s.txt.gz" %(stationid, year)
path = os.path.join(dirp, fname[:-3])
if not os.path.exists(path):
print("downloading from %s" %path)
rr = requests.get(base_url %(fname))
response = rr.text
if 'Unable to access' in response:
print("Unable to access data at %s" %rr.url)
return ''
fp = open(path, 'w')
fp.write(rr.text)
fp.close()
return path
def parse_request(path):
if not os.path.exists(path):
return ''
else:
ss = pd.read_csv(path,
delim_whitespace=True,
header=0,
skiprows=[1],
parse_dates={'buoy_datetime':['#YY', 'MM',
'DD', 'hh', 'mm']},
)
ss['buoy_datetime'] = pd.to_datetime(ss['buoy_datetime'],
format="%Y %m %d %H %M")
return ss
def get_all_buoys():
buoys = pd.DataFrame()
for buoyid in buoy_list.keys():
for yr in [2012, 2013]:
path = get_request(buoyid, yr, 'buoys')
bpd = parse_request(path)
if type(bpd) != str:
bpd['buoy_lat'] = buoy_list[buoyid][0]
bpd['buoy_lon'] = buoy_list[buoyid][1]
bpd['buoy_id'] = buoyid
buoys = buoys.append(bpd)
return buoys
def find_nearest_buoy(lat, lon):
buoy_dists = []
for buoy in buoy_list.keys():
blat, blon = buoy_list[buoy]
dd = compute_distance(blat, blon, lat, lon)
buoy_dists.append((buoy, dd))
buoys_sorted = sorted(buoy_dists, key=lambda x: x[1])
return buoys_sorted
#Convert julian day described in the data to datetime format
def convert_julian_frac(julian_frac, year):
"""
julian_frac is string in the form of a float
"""
frac, julian_day = math.modf(float(julian_frac)+1)
#The drifters reported both 0 and 356 for julian days in the calendar year
#When I get access to source code, I will try to determine which is correct
if int(julian_day) > 365:
julian_day = julian_day-365
year = int(year) + 1
mins, hrs = math.modf(frac*24.)
secs, mins = math.modf(mins*60)
usecs, secs = math.modf(secs*60)
dval= '%s %s %s %s %s' %(year, int(julian_day), int(hrs), int(mins), int(secs))
dtval = datetime.strptime(dval, '%Y %j %H %M %S')
return dtval
def load_data(fname, drifter_type, launch_datetime='2012-06-01 00:00:00', end_datetime='2014-06-01 00:00:00'):
"""Input the name of the drifter file downloaded from the website. This function parses the two types of data,
averaged measurements, M, and calibration measurements, C
"""
min_bat = 7
dval = open(fname, 'r')
#initialize battery voltage
bvalue = -1
grf12 = -1
cd = {"id":[], "cal_start_datetime":[], "sample_datetime":[], "num":[],
"x":[], "y":[], "z":[], "f":[], "temp":[], "lat":[], "lon":[], "bat":[],
}
md = {"id":[], "sample_datetime":[], "num":[],
"x":[], "y":[], "z":[], "f":[], "temp":[], "lat":[], "lon":[], "bat":[],
}
nsams = 250
calsx = [0]*nsams
calsy = [0]*nsams
calsz = [0]*nsams
do_return = False
cl = {"id":[], "cal_start_datetime":[], "calsx":[], "calsy":[], "calsz":[],
"temp":[], "lat":[], "lon":[], "bat":[]}
for line in dval:
line_vals = line.split(' ')
line_vals = [x for x in line_vals if x!='']
line_vals[-1] = line_vals[-1].strip()
if line_vals[0] == 'S':
# S: status line
# S 000000000066760 123 270 2011.08.13 23:03:06 47.651360 -129.042221 8
# S drifter_id rec_# day date time latitude longitude ?
# remove the S character
# this message is sent when the data is uploaded to the server
#mstatus = line_vals[1:]
d = | pd.to_datetime("%s %s" %(line_vals[4], line_vals[5])) | pandas.to_datetime |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 11:47:07 2018
@author: nmei
"""
import os
import pandas as pd
import numpy as np
from glob import glob
import re
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
from matplotlib import pyplot as plt
import utils
figure_dir = 'figures'
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
working_dir = {ii+1:'batch/correlation/results_e{}'.format(ii+1) for ii in range(3)}
working_data = [glob(os.path.join(d,'*.csv')) for _,d in working_dir.items()]
working_data = np.concatenate(working_data)
df = []
for f in working_data:
exp, = re.findall('\d',f)
df_temp = pd.read_csv(f)
df_temp['experiment'] = exp
df.append(df_temp)
df = pd.concat(df)
decode_dir = {ii+1:'batch/results_e{}'.format(ii+1) for ii in range(3)}
decode_data = [glob(os.path.join(d,'*.csv')) for _,d in decode_dir.items()]
decode_data = np.concatenate(decode_data)
df_decode = []
for f in decode_data:
exp, = re.findall('\d',f)
df_temp = | pd.read_csv(f) | pandas.read_csv |
import argparse
from collections import defaultdict
import string
import dill
import numpy as np
import pandas as pd
import scipy as sp
import torch
from tqdm import tqdm
from transformers import XLNetTokenizer, XLNetModel, RobertaModel
from rake_nltk import Rake
import yake # pip3 install git+https://github.com/LIAAD/yake
from GPT_GNN.data import Graph
from GPT_GNN.utils import normalize
parser = argparse.ArgumentParser(description='Preprocess OAG Data')
'''
Dataset arguments
'''
parser.add_argument('--input_dir', type=str, default='preprocessed/oag_raw',
help='The address to store the original data directory.')
parser.add_argument('--output_dir', type=str, default='preprocess_output',
help='The address to output the preprocessed graph.')
parser.add_argument('--domain', type=str, default='_CS')
parser.add_argument('--citation_bar', type=int, default=10,
help='Only consider papers with citation larger than (2020 - year) * citation_bar')
parser.add_argument('--test_year', type=int, default=2017,
help='Papers published after the specific year will be put in the fine-tuning testing dateset.')
args = parser.parse_args()
device = torch.device("cpu") # Only "cpu" for my computer
cite_dict = defaultdict(lambda: 0) # Default value for each key is 0
with open(args.input_dir + '/PR%s_20190919.tsv' % args.domain) as fin: # Use "tsv" file as INPUT
fin.readline() # For title
for l in tqdm(fin, total=sum(1 for line in open(
args.input_dir + '/PR%s_20190919.tsv' % args.domain))): # l = ['2001168787', '1963479517']
l = l[:-1].split('\t') # Split each element
cite_dict[l[1]] += 1
pfl = defaultdict(lambda: {})
with open(args.input_dir + '/Papers%s_20190919.tsv' % args.domain) as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/Papers%s_20190919.tsv' % args.domain))):
l = l[:-1].split('\t')
bound = min(2020 - int(l[1]), 20) * args.citation_bar # USED TO control the size of data in use (based on the diff of published & current years)
# ReferenceId for the corresponding PaperId must not smaller than the "bound"
# No empty value for PaperId, PublishYear, NormalisedTitle, VenueId, DetectedLanguage
# Published Year is no early than 2000 (USED TO control the size of data in use)
if cite_dict[l[0]] < bound or l[0] == '' or l[1] == '' or l[2] == '' or l[3] == '' and l[4] == '' or int(
l[1]) < 2000:
continue
pi = {'id': l[0], 'title': l[2], 'type': 'paper', 'time': int(l[1])} # Store column information
pfl[l[0]] = pi
del cite_dict
# XLNet: Using an auto-regressive method to learn bidirectional contexts by maximizing the expected likelihood
# over all permutations of the input sequence factorization order
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetModel.from_pretrained('xlnet-base-cased', output_hidden_states=True, output_attentions=True).to(device)
# Other NLP models to handle abstract differently
roberta_model = RobertaModel.from_pretrained('roberta-base', output_hidden_states=True, output_attentions=True).to(device)
# Customize punctuation check list for text data cleaning
punc = string.punctuation + "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
# Key extraction techniques:
rake_nltk_var = Rake()
keywords_num = 20 # The maximum number of keywords from abstract
language = "en"
max_ngram_size = 5 # Limit the maximum words number in an extracted keywords n-gram
deduplication_threshold = 0.9 # Repeat the same words in different key phrases (0.1-duplication, 0.9-NO duplication)
# deduplication_algo = 'seqm' # Deduplication function [leve|jaro|seqm]
yake = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_threshold, top=keywords_num, features=None)
pfl_emb = defaultdict(lambda: {})
with open(args.input_dir + '/PAb%s_20190919.tsv' % args.domain, errors='ignore') as fin:
fin.readline()
for l in tqdm(fin, total=sum(
1 for line in open(args.input_dir + '/PAb%s_20190919.tsv' % args.domain, 'r', errors='ignore'))):
try:
l = l.split('\t')
if l[0] in pfl:
abs = l[1] # Grab string of raw abstract
abs = abs.lower() # Convert text to lowercase
abs = abs.translate(str.maketrans('', '', punc)) # Remove punctuation from the string
# Keyword extraction for abstract:
# print("Abstract: \n", abs)
# RAKE (Rapid Automatic Keyword Extraction algorithm):
# rake_nltk_var.extract_keywords_from_text(abs)
# abs_keywords = rake_nltk_var.get_ranked_phrases()
# # if len(abs_keywords) > keywords_num:
# # abs_keywords = abs_keywords[:keywords_num] # Limit the maximum num of keywords from abstract
# abs = ' '.join(abs_keywords)
# YAKE (Yet Another Keyword Extractor):
abs_keywords = yake.extract_keywords(abs)
# print(abs_keywords)
abs = ''
for kw in abs_keywords:
abs = abs + kw[0] + ' ' # Link all keywords together (kw[1] is score: lower -> more relevant)
abs = abs[:-1] # Remove the final space
# print("Final Abstract: \n", abs)
# Consider abstract embedding:
input_ids = torch.tensor([tokenizer.encode(pfl[l[0]]['title'])]).to(device)[:, :64]
abs_input_ids = torch.tensor([tokenizer.encode(abs)]).to(device)[:, :64] # ADJUST the TOKENIZER for abstract contents
if len(input_ids[0]) < 4 or len(abs_input_ids[0]) < 4:
continue
all_hidden_states, all_attentions = model(input_ids)[-2:]
rep = (all_hidden_states[-2][0] * all_attentions[-2][0].mean(dim=0).mean(dim=0).view(-1, 1)).sum(dim=0)
abs_all_hidden_states, abs_all_attentions = roberta_model(abs_input_ids)[-2:] # ADJUST the MODEL for abstract contents
abs_rep = (abs_all_hidden_states[-2][0] * abs_all_attentions[-2][0].mean(dim=0).mean(dim=0).view(-1, 1)).sum(dim=0)
pfl_emb[l[0]] = pfl[l[0]]
pfl_emb[l[0]]['emb'] = rep.tolist() # pfl_emb will not involve any paper without 'emb'
pfl_emb[l[0]]['abs_emb'] = abs_rep.tolist() # Add abstract embedding to the dictionary
# # Only consider title embedding:
# input_ids = torch.tensor([tokenizer.encode("na")]).to(device)[:, :64] # Specially test for empty-content string title
# # input_ids = torch.tensor([tokenizer.encode(pfl[l[0]]['title'])]).to(device)[:, :64] # With title contents
# if len(input_ids[0]) < 4:
# continue
# all_hidden_states, all_attentions = model(input_ids)[-2:]
# rep = (all_hidden_states[-2][0] * all_attentions[-2][0].mean(dim=0).mean(dim=0).view(-1, 1)).sum(dim=0)
# pfl_emb[l[0]] = pfl[l[0]]
# pfl_emb[l[0]]['emb'] = rep.tolist()
# # Consider title and abstract in one embedding:
# input_ids = torch.tensor([tokenizer.encode(pfl[l[0]]['title'] + abs)]).to(device)[:, :64]
# if len(input_ids[0]) < 4:
# continue
# all_hidden_states, all_attentions = model(input_ids)[-2:]
# rep = (all_hidden_states[-2][0] * all_attentions[-2][0].mean(dim=0).mean(dim=0).view(-1, 1)).sum(dim=0)
# pfl_emb[l[0]] = pfl[l[0]]
# pfl_emb[l[0]]['emb'] = rep.tolist()
except Exception as e:
print(e)
del pfl
vfi_ids = {}
with open(args.input_dir + '/vfi_vector.tsv') as fin:
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/vfi_vector.tsv'))):
l = l[:-1].split('\t') # Ignore the last element in the list
vfi_ids[l[0]] = True # Add the 'True' value to the corresponding key - 1st element in the line
graph = Graph()
rem = []
with open(args.input_dir + '/Papers%s_20190919.tsv' % args.domain) as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/Papers%s_20190919.tsv' % args.domain, 'r'))):
l = l[:-1].split('\t')
if l[0] not in pfl_emb or l[4] != 'en' or l[3] not in vfi_ids:
continue
rem += [l[0]]
vi = {'id': l[3], 'type': 'venue', 'attr': l[-2]}
graph.add_edge(pfl_emb[l[0]], vi, time=int(l[1]), relation_type='PV_' + l[-2])
del rem
with open(args.input_dir + '/PR%s_20190919.tsv' % args.domain) as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/PR%s_20190919.tsv' % args.domain))):
l = l[:-1].split('\t')
if l[0] in pfl_emb and l[1] in pfl_emb:
p1 = pfl_emb[l[0]]
p2 = pfl_emb[l[1]]
if p1['time'] >= p2['time']:
# if p1['time'] >= p2['time'] and p1['time'] <= args.test_year: # Break testing links
graph.add_edge(p1, p2, time=p1['time'], relation_type='PP_cite')
ffl = {}
with open(args.input_dir + '/PF%s_20190919.tsv' % args.domain) as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/PF%s_20190919.tsv' % args.domain))):
l = l[:-1].split('\t')
if l[0] in pfl_emb and l[1] in vfi_ids:
ffl[l[1]] = True
with open(args.input_dir + '/FHierarchy_20190919.tsv') as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/FHierarchy_20190919.tsv'))):
l = l[:-1].split('\t')
if l[0] in ffl and l[1] in ffl and l[0] in pfl_emb:
# if l[0] in ffl and l[1] in ffl and l[0] in pfl_emb \
# and pfl_emb[l[0]]['time'] <= args.test_year and pfl_emb[l[1]]['time'] <= args.test_year: # Break testing links
fi = {'id': l[0], 'type': 'field', 'attr': l[2]}
fj = {'id': l[1], 'type': 'field', 'attr': l[3]}
graph.add_edge(fi, fj, relation_type='FF_in')
ffl[l[0]] = fi
ffl[l[1]] = fj
with open(args.input_dir + '/PF%s_20190919.tsv' % args.domain) as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/PF%s_20190919.tsv' % args.domain))):
l = l[:-1].split('\t')
if l[0] in pfl_emb and l[1] in ffl and type(ffl[l[1]]) == dict:
# if l[0] in pfl_emb and l[1] in ffl and type(ffl[l[1]]) == dict \
# and pfl_emb[l[0]]['time'] <= args.test_year: # Break testing links
pi = pfl_emb[l[0]]
fi = ffl[l[1]]
graph.add_edge(pi, fi, time=pi['time'], relation_type='PF_in_' + fi['attr'])
del ffl
coa = defaultdict(lambda: {})
with open(args.input_dir + '/PAuAf%s_20190919.tsv' % args.domain) as fin:
fin.readline()
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/PAuAf%s_20190919.tsv' % args.domain))):
l = l[:-1].split('\t')
if l[0] in pfl_emb and l[2] in vfi_ids:
pi = pfl_emb[l[0]]
ai = {'id': l[1], 'type': 'author'}
fi = {'id': l[2], 'type': 'affiliation'}
coa[l[0]][int(l[-1])] = ai
graph.add_edge(ai, fi, relation_type='in')
del vfi_ids
for pid in tqdm(coa):
if pid not in pfl_emb:
continue
pi = pfl_emb[pid]
max_seq = max(coa[pid].keys())
for seq_i in coa[pid]:
ai = coa[pid][seq_i]
# if pi['time'] <= args.test_year: # Break testing links
if seq_i == 1:
graph.add_edge(ai, pi, time=pi['time'], relation_type='AP_write_first')
elif seq_i == max_seq:
graph.add_edge(ai, pi, time=pi['time'], relation_type='AP_write_last')
else:
graph.add_edge(ai, pi, time=pi['time'], relation_type='AP_write_other')
del coa
with open(args.input_dir + '/vfi_vector.tsv') as fin:
for l in tqdm(fin, total=sum(1 for line in open(args.input_dir + '/vfi_vector.tsv'))):
l = l[:-1].split('\t')
ser = l[0]
for idx in ['venue', 'field', 'affiliation']:
if ser in graph.node_forward[idx] and ser in pfl_emb: # idx is the node name, ser is the node id
graph.node_bacward[idx][graph.node_forward[idx][ser]]['node_emb'] = np.array(l[1].split(' '))
with open(args.input_dir + '/SeqName%s_20190919.tsv' % args.domain, errors='ignore') as fin:
for l in tqdm(fin, total=sum(
1 for line in open(args.input_dir + '/SeqName%s_20190919.tsv' % args.domain, errors='ignore'))):
l = l[:-1].split('\t')
key = l[2]
if key in ['conference', 'journal', 'repository', 'patent']:
key = 'venue'
if key == 'fos':
key = 'field'
if l[0] in graph.node_forward[key]:
s = graph.node_bacward[key][graph.node_forward[key][l[0]]]
s['name'] = l[1]
'''
Calculate the total citation information as node attributes.
'''
for idx, pi in enumerate(graph.node_bacward['paper']):
pi['citation'] = len(graph.edge_list['paper']['paper']['PP_cite'][idx])
for idx, ai in enumerate(graph.node_bacward['author']):
citation = 0
for rel in graph.edge_list['author']['paper'].keys():
for pid in graph.edge_list['author']['paper'][rel][idx]:
citation += graph.node_bacward['paper'][pid]['citation']
ai['citation'] = citation
for idx, fi in enumerate(graph.node_bacward['affiliation']):
citation = 0
for aid in graph.edge_list['affiliation']['author']['in'][idx]:
citation += graph.node_bacward['author'][aid]['citation']
fi['citation'] = citation
for idx, vi in enumerate(graph.node_bacward['venue']):
citation = 0
for rel in graph.edge_list['venue']['paper'].keys():
for pid in graph.edge_list['venue']['paper'][rel][idx]:
citation += graph.node_bacward['paper'][pid]['citation']
vi['citation'] = citation
for idx, fi in enumerate(graph.node_bacward['field']):
citation = 0
for rel in graph.edge_list['field']['paper'].keys():
for pid in graph.edge_list['field']['paper'][rel][idx]:
citation += graph.node_bacward['paper'][pid]['citation']
fi['citation'] = citation
'''
Since only paper have w2v embedding, we simply propagate its
feature to other nodes by averaging neighborhoods.
Then, we construct the Dataframe for each node type.
'''
d = | pd.DataFrame(graph.node_bacward['paper']) | pandas.DataFrame |
import sys
import os
import numpy as np
import pandas as pd
import dill
import torch
def devide_by_steps(data):
# find first/last frame
min_frame = min([x['frame']["id"][0] for x in data])
max_frame = max([max(x['frame']["id"]) for x in data])
#
new_data = []
for n in range(min_frame, max_frame+1):
frame = []
for ped in data:
if n in ped.values[:,1]:
frame.append(ped.values[ped.values[:,1]==n])
print("frame "+ str(n)+" from " + str(max_frame))
new_data.append(frame)
return new_data
def postproccess(dataset):
arr = []
for f in dataset:
arr.append(devide_by_steps(f))
print("dataset proccessed")
# tarr = torch.tensor(arr)
return arr
def maybe_makedirs(path_to_create):
"""This function will create a directory, unless it exists already,
at which point the function will return.
The exception handling is necessary as it prevents a race condition
from occurring.
Inputs:
path_to_create - A string path to a directory you'd like created.
"""
try:
os.makedirs(path_to_create)
except OSError:
if not os.path.isdir(path_to_create):
raise
def derivative_of(x, dt=1, radian=False):
if radian:
x = make_continuous_copy(x)
if x[~np.isnan(x)].shape[-1] < 2:
return np.zeros_like(x)
dx = np.full_like(x, np.nan)
dx[~np.isnan(x)] = np.gradient(x[~np.isnan(x)], dt)
return dx
dt = 0.4
maybe_makedirs('../processed')
data_columns = pd.MultiIndex.from_product([['position', 'velocity', 'acceleration'], ['x', 'y']])
data_columns = data_columns.insert(0,('frame','id'))
data_columns = data_columns.insert(0,('ped','id'))
for desired_source in ['eth', 'hotel', 'univ', 'zara1', 'zara2']:
for data_class in ['train', 'val', 'test']:
data_dict_path = os.path.join('./processed', '_'.join([desired_source, data_class]) + '.pkl')
processed_data_class = []
for subdir, dirs, files in os.walk(os.path.join('trajnet', desired_source, data_class)):
for file in files:
if not file.endswith('.txt'):
continue
input_data_dict = dict()
full_data_path = os.path.join(subdir, file)
print('At', full_data_path)
data = pd.read_csv(full_data_path, sep='\t', index_col=False, header=None)
data.columns = ['frame_id', 'track_id', 'pos_x', 'pos_y']
data['frame_id'] = pd.to_numeric(data['frame_id'], downcast='integer')
data['track_id'] = pd.to_numeric(data['track_id'], downcast='integer')
data['frame_id'] = data['frame_id'] // 10
data['frame_id'] -= data['frame_id'].min()
data['node_id'] = data['track_id'].astype(str)
data.sort_values('frame_id', inplace=True)
# Mean Position
data['pos_x'] = data['pos_x'] - data['pos_x'].mean()
data['pos_y'] = data['pos_y'] - data['pos_y'].mean()
max_timesteps = data['frame_id'].max()
processed_data = []
for node_id in pd.unique(data['node_id']):
node_df = data[data['node_id'] == node_id]
assert np.all(np.diff(node_df['frame_id']) == 1)
node_values = node_df[['pos_x', 'pos_y']].values
if node_values.shape[0] < 2:
continue
x = node_values[:, 0]
y = node_values[:, 1]
vx = derivative_of(x, dt)
vy = derivative_of(y, dt)
ax = derivative_of(vx, dt)
ay = derivative_of(vy, dt)
data_dict = {
('ped','id'): int(node_id),
('frame','id'): node_df["frame_id"].values,
('position', 'x'): x,
('position', 'y'): y,
('velocity', 'x'): vx,
('velocity', 'y'): vy,
('acceleration', 'x'): ax,
('acceleration', 'y'): ay}
ped_dataframe = | pd.DataFrame(data_dict, columns=data_columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
#导入数据集
def read_data(base_info_path,
annual_report_info_path,
tax_info_path,
change_info_path,
news_info_path,
other_info_path,
entprise_info_path,
):
base_info = pd.read_csv(base_info_path) # 企业基本信息
annual_report_info = pd.read_csv(annual_report_info_path)
tax_info = pd.read_csv(annual_report_info_path)
change_info = pd.read_csv(change_info_path)
news_info = pd.read_csv(news_info_path)
other_info = pd.read_csv(other_info_path)
entprise_info = pd.read_csv(entprise_info_path)
pd.to_datetime(tax_info['START_DATE'], format="%Y-%m-%d")
return base_info, annual_report_info, tax_info, change_info, news_info, other_info, entprise_info
df_x = pd.DataFrame(entprise_info['id'])
df_y = pd.DataFrame(entprise_info['label'])
x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size = 0.3, random_state = 2021)
data = pd.concat([x_train, x_test]).reset_index(drop=True)
def get_base_info_feature(df, base_info):
off_data = base_info.copy()
off_data_isnull_rate=off_data.isnull().sum()/len(off_data)
big_null_name=off_data_isnull_rate[off_data_isnull_rate.values>=0.95].index
base_info.drop(big_null_name,axis=1,inplace=True)
base_info.fillna(-1, downcast = 'infer', inplace = True)
#对时间的处理
base_info['opfrom']=pd.to_datetime(base_info['opfrom'],format="%Y-%m-%d") #把数据转换为时间类型
base_info['pre_opfrom']=base_info['opfrom'].map(lambda x:x.timestamp() if x!=-1 else 0) #将时间类型转换为时间戳
base_info['opto']=pd.to_datetime(base_info['opto'],format='%Y-%m-%d')
base_info['pre_opto']=base_info['opto'].map(lambda x:x.timestamp() if x!=-1 else 0)
le=LabelEncoder()
base_info['industryphy']=le.fit_transform(base_info['industryphy'].map(str))
base_info['opscope']=le.fit_transform(base_info['opscope'].map(str))
base_info['opform']=le.fit_transform(base_info['opform'].map(str))
data = df.copy()
data=pd.merge(data, base_info, on='id', how='left')
# 行业类别基本特征
key=['industryphy']
prefixs = ''.join(key) + '_'
#该行业有多少企业经营
pivot=pd.pivot_table(data,index=key,values='id',aggfunc=lambda x:len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'id': prefixs+'different_id'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#行业广告经营特征
key=['industryco','adbusign']
#该行业有多少广告和不广告平均注册金
pivot=pd.pivot_table(data,index=key,values='regcap',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'regcap': prefixs+'mean_regcap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#细类行业特征
key=['industryco']
prefixs = ''.join(key) + '_'
#该行业有多少企业经营
pivot=pd.pivot_table(data,index=key,values='id',aggfunc=lambda x:len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'id': prefixs+'different_id'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#行业从业平均人数
pivot=pd.pivot_table(data,index=key,values='empnum',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'empnum': prefixs+'mean_empnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#行业从业人数最大
pivot=pd.pivot_table(data,index=key,values='empnum',aggfunc=np.max)
pivot=pd.DataFrame(pivot).rename(columns={'empnum': prefixs+'max_empnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#企业所有人数
data['all_people']=list(map(lambda x,y,z : x+y+z ,data['exenum'],data['empnum'],data['parnum']))
#企业实缴金额占注册多少
data['rec/reg']=list(map(lambda x,y : x/y if y!=0 else 0,data['reccap'],data['regcap']))
data.fillna(-1, downcast = 'infer', inplace = True)
#企业没人共交多少
data['mean_hand']=list(map(lambda x,y : x/y if y!=0 else 0,data['regcap'],data['all_people']))
data.fillna(-1, downcast = 'infer', inplace = True)
#经营范围(运动,材料)
key=['opscope']
prefixs = ''.join(key) + '_'
#同样经营范围有那些企业
pivot=pd.pivot_table(data,index=key,values='id',aggfunc=lambda x: len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'id': prefixs+'many_id'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种类型一个企业有多少从业人数
pivot=pd.pivot_table(data,index=key,values='empnum',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'empnum': prefixs+'mean_empnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
# 这种类型共企业有多少合伙人
pivot=pd.pivot_table(data,index=key,values='parnum',aggfunc=np.sum)
pivot=pd.DataFrame(pivot).rename(columns={'parnum': prefixs+'sum_parnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种类型一个企业有多少合伙人
pivot=pd.pivot_table(data,index=key,values='parnum',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'parnum': prefixs+'mean_parnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围平均注册金
pivot=pd.pivot_table(data[data['regcap'].map(lambda x : x!=-1)],index=key,values='regcap',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'regcap': prefixs+'mean_ragcap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围最大和最小注册金
pivot=pd.pivot_table(data[data['regcap'].map(lambda x : x!=-1)],index=key,values='regcap',aggfunc=np.max)
pivot=pd.DataFrame(pivot).rename(columns={'regcap': prefixs+'max_ragcap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围平均实缴金
pivot=pd.pivot_table(data[data['reccap'].map(lambda x : x!=-1)],index=key,values='reccap',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'reccap': prefixs+'mean_raccap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围最大和最小实缴金
pivot=pd.pivot_table(data[data['reccap'].map(lambda x : x!=-1)],index=key,values='reccap',aggfunc=np.max)
pivot= | pd.DataFrame(pivot) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QA queries on new CDR_deid Row Suppression-ICD10ICD9 Snome
#
# see [DC-852] AND [DC-732] for more details
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + tags=["parameters"]
project_id = ""
deid_cdr=""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# # 1 PRC_1 Verify all ICD9(764 -779)/ICD10(P) concept_codes used to specify other conditions originating In the perinatal period (including birth trauma),are not generated/displayed as condition_source_value in the CONDITION_OCCURENCE table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE
(vocabulary_id='ICD9CM' AND
(concept_code LIKE '764%' OR concept_code LIKE '765%' OR concept_code LIKE '766%' OR
concept_code LIKE '767%' OR concept_code LIKE '768%' OR concept_code LIKE '769%' OR concept_code LIKE '770%' OR
concept_code LIKE '771%' OR concept_code LIKE '772%' OR concept_code LIKE '773%' OR concept_code LIKE '774%' OR
concept_code LIKE '775%' OR concept_code LIKE '776%' OR concept_code LIKE '777%' OR concept_code LIKE '778%' OR
concept_code LIKE '779%'))
OR (vocabulary_id='ICD10CM' AND
concept_code LIKE 'P%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.condition_occurrence` p1
JOIN ICD_suppressions p2
ON p1.condition_source_concept_id=p2.concept_id
WHERE condition_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query1 ICD9(764 -779)/ICD10(P) in condition', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query1 ICD9(764 -779)/ICD10(P) in condition', 'result' : ''},
ignore_index = True)
df1
# # 2 PRC_2 Verify all ICD9(764 -779)/ICD10(P) concept_codes used to specify other conditions originating In the perinatal period (including birth trauma),are not generated/displayed as observation_source_value in the OBSERVATION table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE
(vocabulary_id='ICD9CM' AND
(concept_code LIKE '765%' OR concept_code LIKE '766%' OR
concept_code LIKE '767%' OR concept_code LIKE '768%' OR concept_code LIKE '769%' OR concept_code LIKE '770%' OR
concept_code LIKE '771%' OR concept_code LIKE '772%' OR concept_code LIKE '773%' OR concept_code LIKE '774%' OR
concept_code LIKE '775%' OR concept_code LIKE '776%' OR concept_code LIKE '777%' OR concept_code LIKE '778%' OR
concept_code LIKE '779%'))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'P%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.observation` p1
JOIN ICD_suppressions p2
ON p1.observation_source_concept_id=p2.concept_id
WHERE observation_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query2 ICD9(764 -779)/ICD10(P)', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query2 ICD9(764 -779)/ICD10(P)', 'result' : ''},
ignore_index = True)
df1
# # 3 PRC_3 Verify all CD9(V3)/ICD10(Z38) concept_codes used to specify Liveborn infants according to type of birth are not generated/displayed as observation_source_value in the OBSERVATION table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE
(vocabulary_id='ICD9CM' AND (concept_code LIKE 'V3%' ))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'Z38%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.observation` p1
JOIN ICD_suppressions p2
ON p1.observation_source_concept_id=p2.concept_id
WHERE observation_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query3 CD9(V3)/ICD10(Z38) in obs', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query3 CD9(V3)/ICD10(Z38) in obs', 'result' : ''},
ignore_index = True)
df1
# # 4 PRC_4 Verify all CD9(V3)/ICD10(Z38) concept_codes used to specify Liveborn infants according to type of birth are not generated/displayed as condition_source_value in the CONDITION_OCCURENCE table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE
(vocabulary_id='ICD9CM' AND (concept_code LIKE 'V3%' ))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'Z38%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.condition_occurrence` p1
JOIN ICD_suppressions p2
ON p1.condition_source_concept_id=p2.concept_id
WHERE condition_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query4 CD9(V3)/ICD10(Z38) in condition ', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query4 CD9(V3)/ICD10(Z38) in condition ', 'result' : ''},
ignore_index = True)
df1
# # 5 PRC_5 Verify all ICD9(798)/ICD10(R99) concept_codes used to specify Unknown cause of death are not generated/displayed as observation_source_value in the OBSERVATION table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE
(vocabulary_id='ICD9CM' AND (concept_code LIKE '798%' ))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'R99%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.observation` p1
JOIN ICD_suppressions p2
ON p1.observation_source_concept_id=p2.concept_id
WHERE observation_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query5 ICD9(798)/ICD10(R99) in obs', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query5 ICD9(798)/ICD10(R99) in obs', 'result' : ''},
ignore_index = True)
df1
# # 6 PRC_6 Verify all ICD9(799)/ICD10(R99) concept_codes used to specify Unknown cause of death are not generated/displayed as condition_source_value in the CONDITION_OCCURENCE table
#
# <font color='red'> question, ICD9(798) is in the title but in the note, it is 799. ICD10 (R99) in the title but not in the note though. confused here.
#
# after test in the new cdr, should be ICD798 not 799. The original title was wrong.
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE (vocabulary_id='ICD9CM' AND (concept_code LIKE '798%' ))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'R99%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.condition_occurrence` p1
JOIN ICD_suppressions p2
ON p1.condition_source_concept_id=p2.concept_id
WHERE condition_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query6 ICD9(799)/ICD10(R99) in condition', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query6 ICD9(799)/ICD10(R99) in condition', 'result' : ''},
ignore_index = True)
df1
# # 7 PRC_7 Verify all ICD10(Y36) codes used to specify Injury due to war operations are not generated/displayed as condition_source_value in the CONDITION_OCCURENCE table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM
`{project_id}.{deid_cdr}.concept`
WHERE (vocabulary_id='ICD9CM' AND (concept_code LIKE 'E99%' ))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'Y36%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.condition_occurrence` p1
JOIN ICD_suppressions p2
ON p1.condition_source_concept_id=p2.concept_id
WHERE condition_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query7 ICD10(Y36) in condition', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query7 ICD10(Y36) in condition', 'result' : ''},
ignore_index = True)
df1
# # 8 PRC_8 Verify all ICD10(Y36) codes used to specify Injury due to war operations are not generated/displayed as observation_source_value in the OBSERVATION table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE (vocabulary_id='ICD9CM' AND ( concept_code LIKE 'E100%' ))
OR (vocabulary_id='ICD10CM' AND concept_code LIKE 'Y36%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.observation` p1
JOIN ICD_suppressions p2
ON p1.observation_source_concept_id=p2.concept_id
WHERE observation_source_value IS NOT NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query8 ICD10(Y36) in obs', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query8 ICD10(Y36) in obs', 'result' : ''},
ignore_index = True)
df1
# # 9 PRC_9 Verify all ICD10(Y37) codes used to specify Military operations are not generated/displayed as observation_source_value in the OBSERVATION table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE (vocabulary_id='ICD10CM' AND concept_code LIKE 'Y37%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.observation` p1
JOIN ICD_suppressions p2
ON p1.observation_source_concept_id=p2.concept_id
WHERE observation_source_value IS NOT NULL
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
# pylint: disable=C0103,E0401
"""
Template for SNAP Dash apps.
"""
import copy, math, os
import dash
import luts
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
from dash.dependencies import Input, Output
from gui import layout, path_prefix
from plotly.subplots import make_subplots
# Read data blobs and other items used from env
data = pd.read_pickle("data/roses.pickle")
calms = pd.read_pickle("data/calms.pickle")
exceedance = | pd.read_pickle("data/crosswind_exceedance.pickle") | pandas.read_pickle |
import os
import collections
import unittest
import pytest
import pytz
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import pvlib
from .context import capdata as pvc
data = np.arange(0, 1300, 54.167)
index = pd.date_range(start='1/1/2017', freq='H', periods=24)
df = pd.DataFrame(data=data, index=index, columns=['poa'])
# capdata = pvc.CapData('capdata')
# capdata.df = df
"""
Run all tests from project root:
'python -m tests.test_CapData'
Run individual tests:
'python -m unittest tests.test_CapData.Class.Method'
-m flag imports unittest as module rather than running as script
Run tests using pytest use the following from project root.
To run a class of tests
pytest tests/test_CapData.py::TestCapDataEmpty
To run a specific test:
pytest tests/test_CapData.py::TestCapDataEmpty::test_capdata_empty
"""
test_files = ['test1.csv', 'test2.csv', 'test3.CSV', 'test4.txt',
'pvsyst.csv', 'pvsyst_data.csv']
class TestUpdateSummary:
"""Test the update_summary wrapper and functions used within."""
def test_round_kwarg_floats(self):
"""Tests round kwarg_floats."""
kwarg_dict = {'ref_val': 763.4536140499999, 't1': 2, 'inplace': True}
rounded_kwarg_dict_3 = {'ref_val': 763.454, 't1': 2,
'inplace': True}
assert pvc.round_kwarg_floats(kwarg_dict) == rounded_kwarg_dict_3
rounded_kwarg_dict_4 = {'ref_val': 763.4536, 't1': 2,
'inplace': True}
assert pvc.round_kwarg_floats(kwarg_dict, 4) == rounded_kwarg_dict_4
def test_tstamp_kwarg_to_strings(self):
"""Tests coversion of kwarg values from timestamp to strings."""
start_datetime = pd.to_datetime('10/10/1990 00:00')
kwarg_dict = {'start': start_datetime, 't1': 2}
kwarg_dict_str_dates = {'start': '1990-10-10 00:00', 't1': 2}
assert pvc.tstamp_kwarg_to_strings(kwarg_dict) == kwarg_dict_str_dates
class TestTopLevelFuncs(unittest.TestCase):
def test_perc_wrap(self):
"""Test percent wrap function."""
rng = np.arange(1, 100, 1)
rng_cpy = rng.copy()
df = pd.DataFrame({'vals': rng})
df_cpy = df.copy()
bool_array = []
for val in rng:
np_perc = np.percentile(rng, val, interpolation='nearest')
wrap_perc = df.agg(pvc.perc_wrap(val)).values[0]
bool_array.append(np_perc == wrap_perc)
self.assertTrue(all(bool_array),
'np.percentile wrapper gives different value than np perc')
self.assertTrue(all(df == df_cpy), 'perc_wrap function modified input df')
def test_filter_irr(self):
rng = np.arange(0, 1000)
df = pd.DataFrame(np.array([rng, rng+100, rng+200]).T,
columns = ['weather_station irr poa W/m^2',
'col_1', 'col_2'])
df_flt = pvc.filter_irr(df, 'weather_station irr poa W/m^2', 50, 100)
self.assertEqual(df_flt.shape[0], 51,
'Incorrect number of rows returned from filter.')
self.assertEqual(df_flt.shape[1], 3,
'Incorrect number of columns returned from filter.')
self.assertEqual(df_flt.columns[0], 'weather_station irr poa W/m^2',
'Filter column name inadverdently modified by method.')
self.assertEqual(df_flt.iloc[0, 0], 50,
'Minimum value in returned data in filter column is'
'not equal to low argument.')
self.assertEqual(df_flt.iloc[-1, 0], 100,
'Maximum value in returned data in filter column is'
'not equal to high argument.')
def test_fit_model(self):
"""
Test fit model func which wraps statsmodels ols.fit for dataframe.
"""
rng = np.random.RandomState(1)
x = 50 * abs(rng.rand(50))
y = 2 * x - 5 + 5 * rng.randn(50)
df = pd.DataFrame({'x': x, 'y': y})
fml = 'y ~ x - 1'
passed_ind_vars = fml.split('~')[1].split()[::2]
try:
passed_ind_vars.remove('1')
except ValueError:
pass
reg = pvc.fit_model(df, fml=fml)
for var in passed_ind_vars:
self.assertIn(var, reg.params.index,
'{} ind variable in formula argument not in model'
'parameters'.format(var))
def test_predict(self):
x = np.arange(0, 50)
y1 = x
y2 = x * 2
y3 = x * 10
dfs = [pd.DataFrame({'x': x, 'y': y1}),
| pd.DataFrame({'x': x, 'y': y2}) | pandas.DataFrame |
#%%
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%% Measurementes
ms_path = 'examples/bs2019/measurements.csv'
ms = pd.read_csv(ms_path, index_col=0)
ms.index = pd.to_datetime(ms.index)
t0 = pd.to_datetime('2018-04-05 00:00:00')
t1 = pd.to_datetime('2018-04-08 00:00:00')
ms = ms.loc[t0:t1]
solrad = ms['solrad'].values
Tout = ms['Tout'].values
occ = ms['occ'].values
t = (ms.index - ms.index[0]).total_seconds() / 3600.
fig, ax = plt.subplots(3, 1, figsize=(5, 3), sharex=True)
fig.set_dpi(120)
ax[0].plot(t, Tout, 'b-')
ax[0].set_ylabel("$T_{out}$ [$^\circ$C]")
ax[1].plot(t, solrad, 'b-')
ax[1].set_ylabel("$q_{sol}$ [W/m$^2$]")
ax[2].plot(t, occ, 'b-')
ax[2].set_ylabel("$n_{occ}$ [-]")
ax[2].set_xticks(np.arange(0, 73, 24))
ax[2].set_xlabel("$t$ [h]")
plt.subplots_adjust(0.13, 0.15, 0.98, 0.98)
fig.savefig('examples/bs2019/figs/inputs_mpc.pdf')
### Case 1 ####################################################################
#%% Compare estimates between FMUs
est_dir = 'examples/bs2019/case1/results/est/'
tols = [
'1e-4',
'1e-6',
'1e-7',
'1e-9',
'1e-11'
]
cols = pd.read_csv(est_dir + 'r1c1_dymola_' + tols[0] +
'/parameters_rel.csv').columns
parameters = pd.DataFrame(index=pd.Index(tols, name='tol'),
columns=cols)
for t in tols:
for p in cols:
parameters.loc[t, p] = pd.read_csv(est_dir + 'r1c1_dymola_'
+ t + '/parameters_rel.csv')[p].iloc[0]
parameters.T.plot(kind='bar')
#%% Parameter estimation: validation
est_dir = 'examples/bs2019/case1/results/est/'
tols = [
'1e-4',
'1e-6',
'1e-7',
'1e-9',
'1e-11'
]
idl = pd.read_csv(est_dir + 'ideal.csv', index_col=0)
idl
vld = pd.DataFrame()
for t in tols:
res = pd.read_csv(est_dir + 'r1c1_dymola_' + t +
'/vld_res.csv', index_col=0)
vld[t] = res['T']
idl = idl.loc[:vld.index[-1]]
idl.index /= 3600.
vld.index /= 3600.
# Plot
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
fig.set_dpi(130)
ax.plot(idl['T'], ls='-', label='Measurement')
ax.plot(vld['1e-11'], ls='-.', label='R1C1')
ax.legend(loc='lower right')
ax.set_xticks(np.arange(0, vld.index[-1] + 1, 24))
ax.set_ylabel('$T$ [$^\circ$C]')
ax.set_xlabel('$t$ [h]')
ax.vlines(5*24., ymin=19.5, ymax=26.9, linestyles='--', lw=0.75, color='k')
ax.set_ylim(19.5, 26.9)
ax.text(80, 26.4, "Training")
ax.text(128, 26.4, "Validation")
plt.subplots_adjust(0.1, 0.18, 0.99, 0.99)
fig.savefig('examples/bs2019/figs/validation_T.pdf')
#%% Result overview
fmu = 'r1c1_dymola_1e-11'
hrz = 4
outdir = 'examples/bs2019/case1/results/mpc/{}/h{}/'.format(fmu, hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
# Optimized inputs
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(130)
# ax[0]
ax[0].plot(u['vpos'], 'k-', lw=2)
ax[0].set_ylim(-100, 100)
ax[0].set_ylabel('$q$ [%]')
# ax[1]
# ax[1].plot(xctr['x0'], label='Control')
ax[1].plot(xemu['cair.T'], 'r-', label=fmu)
ax[1].legend(loc='upper right')
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T_i$ [$^\circ$C]')
# ax[0] - subinterval solutions
files = os.listdir(outdir)
ufiles = list()
for f in files:
fname = f.split('.')[0]
if fname[0] == 'u' and len(fname) > 1:
ufiles.append(f)
udfs = list()
for i in range(len(ufiles)):
df = pd.read_csv(outdir + 'u{}.csv'.format(i), index_col=0)
df.index /= 3600.
ax[0].plot(df['vpos'], ls='--', lw=1.)
# plt.show()
#%% Compare horizons
fmu = 'r1c1_dymola_1e-11'
horizons = [2, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(120)
Qrc = dict()
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case1/results/mpc/{}/h{}/'.format(fmu, hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
u['vpos'] *= 20 # [%] -> [W]
Qrc[hrz] = u['vpos'].abs().sum() / 1000.
# Actual horizon string, e.g. "6h"
ahrz = "{}h".format(hrz)
# Color map
lspace = np.linspace(0, 1, len(horizons))
colors = [plt.cm.winter(x) for x in lspace]
# ax[0]
ax[0].plot(u['vpos'], c=colors[i], label=ahrz)
# ax[1]
ax[1].plot(xemu['cair.T'], c=colors[i], label=ahrz)
i += 1
ax[1].legend(loc='center', bbox_to_anchor=(0.5,-0.5), ncol=5)
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[0].set_ylim(-2200, 2200)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[0].set_ylabel('$q$ [W]')
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T$ [$^\circ$C]')
ax[0].set_title('(a)')
ax[1].set_title('(b)')
plt.subplots_adjust(left=0.16, right=0.99, top=0.93, bottom=0.24)
fig.tight_layout()
fig.savefig('examples/bs2019/figs/case1_horizon_tol_1e-11.pdf')
#%% Computational time
# FMU 1e-11
wd1 = 'examples/bs2019/case1/results/mpc/r1c1_dymola_1e-11/'
# FMU 1e-9
wd2 = 'examples/bs2019/case1/results/mpc/r1c1_dymola_1e-9/'
# SVM
wd3 = 'examples/bs2019/case2/results/mpc-lin/'
hdirs1 = [x[0].split('/')[-1] for x in os.walk(wd1)][1:]
hdirs2 = [x[0].split('/')[-1] for x in os.walk(wd2)][1:]
hdirs3 = [x[0].split('/')[-1] for x in os.walk(wd3)][1:]
hix = [int(x[1:]) for x in hdirs1]
hix = sorted(hix)
ct1 = list()
ct2 = list()
ct3 = list()
# Number of optimization variables
nv = [x * 2 for x in hix]
# Optimization horizon [h]
oh = [x for x in hix]
for h in hix:
with open(wd1 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct1.append(x / 60.)
with open(wd2 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct2.append(x / 60.)
with open(wd3 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct3.append(x / 60.)
fig, ax = plt.subplots(1, 1, figsize=(5,3))
fig.set_dpi(120)
plt.plot(oh, ct1, marker='s', c='k', ls=':', lw=1., label='R1C1 FMU (tol=1e-11)')
plt.plot(oh, ct2, marker='o', c='b', ls=':', lw=1., label='R1C1 FMU (tol=1e-9)')
plt.plot(oh, ct3, marker='v', c='r', ls=':', lw=1., label='SVR')
ax.set_xlabel('Optimization horizon [h]')
ax.set_ylabel('Total CPU time [min]')
ax2 = ax.twiny()
ax2.set_xticks(ax.get_xticks())
ax2.set_xlim(ax.get_xlim())
ax2.set_xticklabels([int(x * 2) for x in ax.get_xticks()])
ax2.set_xlabel('Number of optimization variables')
ax.legend()
ax.grid()
plt.subplots_adjust(0.1, 0.18, 0.99, 0.85)
fig.savefig('examples/bs2019/figs/cputime.pdf')
plt.show()
#%% Solution quality - omit CVode FMUs, they seem not working correctly
# Read all inputs and states
wd = 'examples/bs2019/case1/results/mpc/'
fmus = os.listdir(wd)
hz = '/h10/'
new_names = [y[5:].replace('_', ' ') for y in fmus]
for i in range(len(new_names)):
new_names[i] = new_names[i].replace('dymola ', 'tol=')
cdirs = [wd + x + hz for x in fmus]
cmap = {x:y for x, y in zip(cdirs, new_names)}
uall = pd.DataFrame()
xall = pd.DataFrame()
for c, f in zip(cdirs, fmus):
u = pd.read_csv(c + 'u.csv', index_col=0)
x = pd.read_csv(c + 'xemu.csv', index_col=0)
uall[c] = u['vpos']
xall[c] = x['cair.T']
uall = uall.rename(columns=cmap) # Inputs
xall = xall.rename(columns=cmap) # States
# Energy consumption
q = uall * 20.
Q = q.abs().sum() / 1000. # [kWh]
# Constraint violation
cstr = pd.read_csv(wd + 'r1c1_dymola_1e-9/h2/constr.csv')
cstr['time'] = cstr['time'].astype(int)
cstr = cstr.set_index('time')
vup = xall.copy()
vlo = xall.copy()
for c in xall:
vup[c] = xall[c] - cstr['Tmax']
vup[c].loc[vup[c] < 0] = 0
vlo[c] = cstr['Tmin'] - xall[c]
vlo[c].loc[vlo[c] < 0] = 0
vtot = vup + vlo
vtot = vtot.sum()
# Case order for plots
cord = ['tol=1e-4', 'tol=1e-6', 'tol=1e-7', 'tol=1e-9', 'tol=1e-11']
# Ordered results
Qord = [Q.loc[x] for x in cord]
vord = [vtot.loc[x] for x in cord]
# Show both on scatter plot
n_horizons = 5
lspace = np.linspace(0, 1, n_horizons)
colors = [plt.cm.jet(x) for x in lspace]
markers = ['o', 's', 'D', 'v', '^']
fig, ax = plt.subplots(figsize=(5, 3))
fig.set_dpi(120)
for q, v, l, c, m in zip(Qord, vord, cord, colors, markers):
plt.scatter(q, v, label=l, c=c, s=100, marker=m)
ax.set_xlabel('Total energy consumption $Q$ [kWh]')
ax.set_ylabel('Temperature violation $v_T$ [Kh]')
ax.legend(loc='center', ncol=3, bbox_to_anchor=(0.45,-0.4))
ax.grid()
plt.subplots_adjust(0.18, 0.35, 0.97, 0.95)
fig.savefig('examples/bs2019/figs/solution_quality.pdf')
# Case 2 ######################################################################
#%% Model validation
svr_x = pd.read_csv('examples/bs2019/case2/results/mpc-lin/vld_xctr.csv', index_col=0)
svr_x = svr_x.rename(columns={'cair.T':'T'})
svr_x.index /= 3600.
svr_x['T'] -= 273.15
rc_x = pd.read_csv('examples/bs2019/case2/results/mpc-lin/vld_xemu.csv', index_col=0)
rc_x = rc_x.rename(columns={'cair.T':'T'})
rc_x.index /= 3600.
rc_x['T'] -= 273.15
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
fig.set_dpi(130)
ax.plot(rc_x.index, rc_x['T'].values, label='R1C1')
ax.plot(svr_x.index, svr_x['T'].values, label='SVR', ls='--')
ax.legend()
ax.set_xlabel('$t$ [h]')
ax.set_ylabel('$T$ [$^\circ$C]')
ax.set_xticks(np.arange(0, 97, 24))
plt.subplots_adjust(0.13, 0.15, 0.98, 0.98)
fig.savefig('examples/bs2019/figs/svr_validation.pdf')
#%% Overview
hrz = 6
outdir = 'examples/bs2019/case2/results/mpc-lin/h{}/'.format(hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
# Optimized inputs
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(130)
# ax[0]
ax[0].plot(u['vpos'], 'k-', lw=2)
ax[0].set_ylim(-100, 100)
ax[0].set_ylabel('$q$ [%]')
# ax[1]
ax[1].plot(xctr['x0'], label='Control')
ax[1].plot(xemu['cair.T'], 'r--', label='Emulation')
ax[1].legend(loc='upper left')
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T_i$ [$^\circ$C]')
# ax[0] - subinterval solutions
files = os.listdir(outdir)
ufiles = list()
for f in files:
fname = f.split('.')[0]
if fname[0] == 'u' and len(fname) > 1:
ufiles.append(f)
udfs = list()
for i in range(len(ufiles)):
df = pd.read_csv(outdir + 'u{}.csv'.format(i), index_col=0)
df.index /= 3600.
ax[0].plot(df['vpos'], ls='--', lw=1.)
#%%
horizons = [2, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(120)
Qsvr = dict()
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case2/results/mpc-lin/h{}/'.format(hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
u['vpos'] *= 20. # [%] -> [W]
Qsvr[hrz] = u['vpos'].abs().sum() / 1000.
# Actual horizon string, e.g. "6h"
ahrz = "{}h".format(hrz)
# Color map
lspace = np.linspace(0, 1, len(horizons))
colors = [plt.cm.winter(x) for x in lspace]
# ax[0]
ax[0].plot(u['vpos'], c=colors[i], label=ahrz)
# ax[1]
ax[1].plot(xemu['cair.T'], c=colors[i], label=ahrz)
i += 1
ax[1].legend(loc='center', bbox_to_anchor=(0.5,-0.5), ncol=5)
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[0].set_ylim(-2200, 2200)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[0].set_ylabel('$q$ [W]')
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T$ [$^\circ$C]')
ax[0].set_title('(a)')
ax[1].set_title('(b)')
plt.subplots_adjust(left=0.16, right=0.99, top=0.93, bottom=0.24)
fig.tight_layout()
fig.savefig('examples/bs2019/figs/case2_horizon.pdf')
### Case 3 ####################################################################
#%% Result vs. horizon
horizons = [9]#, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 2, sharex=True, sharey=False,
figsize=(6, 4))
fig.set_dpi(120)
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case3/results/mpc/h{}/'.format(hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr['Tmin'] -= 273.15
constr['Tmax'] -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu['cair.T'] -= 273.15
# Control states
xctr = | pd.read_csv(outdir + 'xctr.csv') | pandas.read_csv |
import os
import pytest
import numpy as np
import pandas as pd
from napeca_post import s2p_plot_rois_activity_funcs
# getting our fixtures set up
# initializing the path_dict dictionary
@pytest.fixture
def path_dict():
return {}
# defines the directory for which future fixtures will pull from
@pytest.fixture()
def path_dict_entry(path_dict):
path_dict['s2p_dir'] = os.path.join(os.path.abspath('./napeca_post/sample_data/VJ_OFCVTA_7_260_D6_snippit'), 'suite2p', 'plane0')
return path_dict
# calls the define_paths_roi_plots function which loads the appropriate paths into a dictionary for subsequent fixtures
@pytest.fixture()
def paths_function_call(path_dict, path_dict_entry):
path_dict = s2p_plot_rois_activity_funcs.define_paths_roi_plots(path_dict_entry, [0, 10], None, os.path.abspath('./napeca_post/sample_data/VJ_OFCVTA_7_260_D6_snippit'))
return path_dict
# initializing hte s2p_data_dict dictionary
@pytest.fixture()
def s2p_data_dict():
return {}
# call the load_s2p_data_roi_plots function to load the npy files to which we reference in the path_dict dictionary
# necessary fixture so we can perform operations on the data
@pytest.fixture()
def load_s2p_data_roi_plots_for_testing(path_dict, paths_function_call, s2p_data_dict):
s2p_data_dict = s2p_plot_rois_activity_funcs.load_s2p_data_roi_plots(paths_function_call)
return s2p_data_dict
# initializing plot_vars dictionary
@pytest.fixture()
def plot_vars():
return {}
# calls the plotting_rois function which tells subsequent plots how many rois to load in
@pytest.fixture()
def creating_plot_vars(path_dict_entry, load_s2p_data_roi_plots_for_testing, s2p_data_dict, plot_vars):
plot_vars = s2p_plot_rois_activity_funcs.plotting_rois(load_s2p_data_roi_plots_for_testing, path_dict_entry)
return plot_vars
# initializing trace_data_selected list
@pytest.fixture()
def trace_data_selected():
return []
# performing calculation using output from all of the previous fixtures to generate an array which is the product of
# running all of the functions in the s2p_plot_rois_activity_funcs file. This is what will be compared to our ground truth
@pytest.fixture()
def trace_data_selected_init(trace_data_selected, load_s2p_data_roi_plots_for_testing, creating_plot_vars, plot_vars):
trace_data_selected = load_s2p_data_roi_plots_for_testing['F_npil_corr_dff'][creating_plot_vars['cell_ids']]
trace_data_selected = trace_data_selected.astype(np.float32)
return trace_data_selected
# loads in our ground truth for a specific run of our sample data, included in repo, to compare to that which
# was loaded from the previous fixtures
@pytest.fixture()
def ground_truth():
data = pd.read_csv("trace_data_selected.csv", header = None)
data = | pd.DataFrame.to_numpy(data) | pandas.DataFrame.to_numpy |
import pytest
import os
import pandas as pd
import palantir
def rootdir():
return os.path.dirname(os.path.abspath(__file__))
headers = [
"Timestamp",
"Open",
"High",
"Low",
"Close",
"Volume(BTC)",
"Volume(Currency)",
"WeightedPrice",
]
data = | pd.DataFrame(columns=headers) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytask
from src.config import BLD
from src.config import SRC
from src.shared import create_age_groups
from src.shared import load_dataset
LOCATIONS = [
"cnt_home",
"cnt_work",
"cnt_school",
"cnt_leisure",
"cnt_transport",
"cnt_otherplace",
]
MOSSONG_IN = SRC / "original_data" / "mossong_2008"
MOSSONG_OUT = BLD / "data" / "mossong_2008"
@pytask.mark.depends_on(
{
"hh_common": MOSSONG_IN / "hh_common.csv",
"hh_extra": MOSSONG_IN / "hh_extra.csv",
"participant_common": MOSSONG_IN / "participant_common.csv",
"participant_extra": MOSSONG_IN / "participant_extra.csv",
"contact_common": MOSSONG_IN / "contact_common.csv",
"sday": MOSSONG_IN / "sday.csv",
"eu_hh_size_shares": BLD
/ "data"
/ "population_structure"
/ "eu_hh_size_shares.pkl",
"shared.py": SRC / "shared.py",
}
)
@pytask.mark.produces(
{
"contact_data": MOSSONG_OUT / "contact_data.pkl",
"hh_sample": MOSSONG_OUT / "hh_sample_ger.csv",
"hh_probabilities": MOSSONG_OUT / "hh_probabilities.csv",
}
)
def task_prepare_mossong_data(depends_on, produces):
datasets = {
key: load_dataset(val)
for key, val in depends_on.items()
if not key.endswith(".py")
}
# clean data
hh = _prepare_hh_data(datasets["hh_common"], datasets["hh_extra"])
participants = _prepare_participant_data(
datasets["participant_common"], datasets["participant_extra"]
)
contacts = _prepare_contact_data(datasets["contact_common"])
sday = _prepare_day_data(datasets["sday"])
# contact_data
contacts = _merge_mossong_data(
contacts=contacts, participants=participants, sday=sday, hh=hh
)
contacts = _make_columns_in_contact_data_nice(contacts)
contacts = contacts[contacts["country"].isin(["LU", "DE_TOT", "BE", "NL"])]
contacts = contacts.dropna(how="any")
contacts.to_pickle(produces["contact_data"])
# household sample for initial states
hh = hh.query("country == 'DE_TOT'")
hh = _from_wide_to_long_format(hh)
hh = _drop_hh_with_missing_ages(hh)
hh.to_csv(produces["hh_sample"])
# household probability weights
hh["collapsed_hh_size"] = hh["hh_size"].where(
hh["hh_size"] <= 5, pd.Interval(5.0, np.inf)
)
sample_hh_size_shares = hh["collapsed_hh_size"].value_counts(normalize=True)
inv_prob_weights = datasets["eu_hh_size_shares"]["DE_TOT"] / sample_hh_size_shares
hh["hh_inv_prob_weights"] = hh["collapsed_hh_size"].replace(inv_prob_weights)
hh["probability"] = hh["hh_inv_prob_weights"] / hh["hh_inv_prob_weights"].sum()
hh_probs = hh[["hh_id", "probability"]]
hh_probs.to_csv(produces["hh_probabilities"])
def _prepare_hh_data(common, extra):
common = common.copy()
common["country"] = common["country"].replace({"DE": "DE_TOT", "GB": "UK"})
hh = pd.merge(left=common, right=extra, on="hh_id")
return hh
def _prepare_participant_data(common, extra):
common = common.copy(deep=True)
extra = extra.copy(deep=True)
extra["part_occupation"].replace(
{
1: "working",
2: "retired",
3: "at home (housewife)",
4: "unemployed",
5: "fulltime education",
6: "other",
},
inplace=True,
)
missed_d = {1: 0, 2: "1-4", 3: "5-9", 4: ">10"}
rename = [
("nr_missed_to_record", "diary_missed_unsp"),
("nr_missed_to_record_physical", "diary_missed_skin"),
("nr_missed_to_record_not_physical", "diary_missed_noskin"),
]
for new, old in rename:
extra[new] = extra[old].replace(missed_d)
extra.drop(columns=[old], inplace=True)
participants = | pd.merge(left=common, right=extra, on="part_id") | pandas.merge |
"""
Created on Jun 28 10:39 2018
@author: nishit
"""
import datetime
import time
import math
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
from utils_intern.messageLogger import MessageLogger
from utils_intern.timeSeries import TimeSeries
logger = MessageLogger.get_logger_parent()
class ProcessingData:
def __init__(self, type, hist_data=None):
self.new_df = None
self.max = 1
self.min = 0
self.start_date_hist = datetime.datetime.strptime("2016-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
self.type = type
if self.type == "pv":
self.preprocess_hist_data(hist_data)
def preprocess_hist_data(self, hist_data):
hist_data_new = []
hist_data.insert(0, hist_data[-1])
for t, v in hist_data:
hist_data_new.append([datetime.datetime.fromtimestamp(t).strftime("%Y-%m-%d %H:%M:%S"), float(v)])
hd = pd.DataFrame(hist_data_new, columns=['Time', 'Values'])
hd['Time'] = pd.to_datetime(hd["Time"], errors='coerce')
hd.index = hd["Time"]
hd = hd.drop(columns=['Time'])
data = hd.values.reshape(-1, 1)
Xmin = np.amin(data)
Xmax = np.amax(data)
X_std = (data - Xmin) / (Xmax - Xmin)
max = 1
min = 0
self.X_scaled_hist = X_std * (max - min) + min
def preprocess_data_predict_load(self, raw_data, num_timesteps):
# Loading Data
# taking the last timestamp since we are going to use only the last data vector
latest_timestamp = raw_data[-1:][0][0]
logger.debug(latest_timestamp)
# df = pd.DataFrame(raw_data, columns=col_heads)
df = pd.DataFrame(raw_data)
df = df[df.columns[:2]]
df.columns = ['Time', 'Electricity']
new_df = df
new_df.columns = ['DateTime', 'Electricity']
# Changing dtype to pandas datetime format
new_df['DateTime'] = pd.to_datetime(new_df['DateTime'], unit='s')
new_df = new_df.set_index('DateTime')
# checking for null values and if any, replacing them with last valid observation
new_df.isnull().sum()
new_df.Electricity.fillna(method='pad', inplace=True)
# scale the data to be in the range (0, 1)
data = new_df.values.reshape(-1, 1)
# scaler = MinMaxScaler(feature_range=(0, 1), copy=False)
# data = scaler.fit_transform(data)
flat_list = [item for sublist in data for item in sublist]
# Quantile Normalization
s = pd.Series(flat_list)
quant = s.quantile(0.75)
Xmin = np.amin(data)
Xmax = quant
if Xmax <= Xmin:
Xmax = Xmin + 0.001
X_std = (data - Xmin) / (Xmax - Xmin)
data = X_std * (self.max - self.min) + self.min
look_back = num_timesteps
num_features = 1
logger.debug("data dim = " + str(data.shape))
logger.debug("input shape = " + str(num_timesteps))
nb_samples = data.shape[0] - num_timesteps + 1
if nb_samples > 0:
logger.debug("nb samples is " + str(nb_samples))
x_train_reshaped = np.zeros((nb_samples, look_back, num_features))
for i in range(nb_samples):
y_position_start = i + look_back
x_train_reshaped[i] = data[i:y_position_start]
Xtest = x_train_reshaped[-1:]
logger.debug("shape : " + str(Xtest.shape))
return Xtest, Xmax, Xmin, latest_timestamp
return None, None, None, None
def preprocess_data_predict_pv(self, raw_data, num_timesteps, input_size_hist):
# Loading Data
# taking the last timestamp since we are going to use only the last data vector
latest_timestamp = raw_data[-1:][0][0]
logger.debug(latest_timestamp)
# df = pd.DataFrame(raw_data, columns=col_heads)
df = pd.DataFrame(raw_data)
df = df[df.columns[:2]]
df.columns = ['Time', 'Electricity']
new_df = df
new_df.columns = ['DateTime', 'Electricity']
# Changing dtype to pandas datetime format
new_df['DateTime'] = pd.to_datetime(new_df['DateTime'], unit='s')
new_df = new_df.set_index('DateTime')
# checking for null values and if any, replacing them with last valid observation
new_df.isnull().sum()
new_df.Electricity.fillna(method='pad', inplace=True)
# scale the data to be in the range (0, 1)
data = new_df.values.reshape(-1, 1)
# scaler = MinMaxScaler(feature_range=(0, 1), copy=False)
# data = scaler.fit_transform(data)
flat_list = [item for sublist in data for item in sublist]
# Quantile Normalization
s = pd.Series(flat_list)
quant = s.quantile(0.75)
Xmin = np.amin(data)
Xmax = quant
if Xmax <= Xmin:
Xmax = Xmin + 0.001
X_std = (data - Xmin) / (Xmax - Xmin)
data = X_std * (self.max - self.min) + self.min
look_back = num_timesteps
num_features = 1
logger.debug("data dim = " + str(data.shape))
logger.debug("input shape = " + str(num_timesteps))
nb_samples = data.shape[0] - num_timesteps + 1
if nb_samples > 0:
logger.debug("nb samples is " + str(nb_samples))
x_train_reshaped = np.zeros((nb_samples, look_back, num_features))
h_train_reshaped = np.zeros((nb_samples, input_size_hist, num_features))
for i in range(nb_samples):
y_position_start = i + look_back
x_train_reshaped[i] = data[i:y_position_start]
start_date_index = self.find_nearest_hour_index(
datetime.datetime.strptime(str(new_df.index[i]), "%Y-%m-%d %H:%M:%S"))
end_date_index = start_date_index + input_size_hist
histXtrain = self.X_scaled_hist[start_date_index:end_date_index]
if end_date_index >= len(self.X_scaled_hist):
histXtrain = histXtrain + self.X_scaled_hist[0:len(self.X_scaled_hist) - end_date_index]
h_train_reshaped[i] = histXtrain
Xtest = x_train_reshaped[-1:]
Htest = h_train_reshaped[-1:]
logger.debug("shape : " + str(Xtest.shape))
return {"real": Xtest, "hist": Htest}, Xmax, Xmin, latest_timestamp
return None, None, None, None
def postprocess_data(self, prediction, startTimestamp, delta, horizon_steps, Xmax, Xmin):
data = prediction.reshape(-1, 1)
# data = scaler.inverse_transform(data)
data = (data - self.min) / (self.max - self.min)
data = data * (Xmax - Xmin) + Xmin
data = data.reshape(-1)
startTime = datetime.datetime.fromtimestamp(startTimestamp)
result = []
for pred in data:
result.append([startTime.timestamp(), pred])
startTime += datetime.timedelta(seconds=60)
result = TimeSeries.expand_and_resample_reversed(result, delta, False)
result = result[:horizon_steps]
logger.debug("pred out start val = " + str(result[0]))
output = {}
for t, v in result:
output[datetime.datetime.fromtimestamp(t)] = v
return output
def append_mock_data(self, data, num_timesteps, dT):
l = len(data)
diff = num_timesteps - l + 1
if l == 0:
earliest_timestamp = time.time()
else:
earliest_timestamp = data[0][0]
new_data = data
for i in range(diff):
earliest_timestamp -= dT
new_data.insert(0, [earliest_timestamp, 0.000001])
return new_data
def break_series_into_countinous_blocks(self, raw_data, dT, steps):
allowed_continous_gap_percent = 0.1
duration_of_one_data_set = steps * dT
required_mins = math.ceil(duration_of_one_data_set / 60.0)
allowed_continous_gap_mins = required_mins * allowed_continous_gap_percent
continous_series = []
temp_data = []
logger.info("allowed " + str(allowed_continous_gap_mins))
prev_time = raw_data[0][0]
for i in range(len(raw_data)):
curr_time = raw_data[i][0]
minute_diff = (curr_time - prev_time) / 60.0
if minute_diff > allowed_continous_gap_mins:
continous_series.append(temp_data.copy())
temp_data = []
temp_data.append(raw_data[i])
prev_time = curr_time
if len(temp_data) > 0:
continous_series.append(temp_data.copy())
return continous_series
def expand_and_resample_into_blocks(self, raw_data, model_data_dT, input_size, output_size):
if len(raw_data) > 0:
min_length = input_size + output_size
blocks = self.break_series_into_countinous_blocks(raw_data, model_data_dT, min_length)
logger.info("num blocks = " + str(len(blocks)))
resampled_blocks = []
block_has_min_length = []
merged = False
for block in blocks:
resampled_block = TimeSeries.expand_and_resample(block, model_data_dT)
if len(resampled_block) > 0:
resampled_blocks.append(resampled_block)
logger.info("block size = " + str(len(resampled_block)))
if len(resampled_block) >= min_length:
block_has_min_length.append(True)
else:
block_has_min_length.append(False)
if len(block_has_min_length) > 0 and not any(block_has_min_length):
logger.info("merging block because insufficient data")
new_block = []
end_time = resampled_blocks[-1][-1][0]
# TODO : check logic
for i in reversed(range(len(resampled_blocks))):
rsb = resampled_blocks[i]
start_time = rsb[0][0]
if end_time - start_time < min_length * model_data_dT:
rsb.extend(new_block)
new_block = rsb
merged = True
else:
rsb.extend(new_block)
new_block = rsb
merged = True
break
if merged:
new_block = TimeSeries.expand_and_resample(new_block, model_data_dT)
logger.info("length of merged blocks after expand = " + str(len(new_block)))
resampled_blocks = [new_block]
return resampled_blocks, merged
else:
return [], False
def preprocess_data_train_load(self, blocks, dT, input_size, output_size, sp):
x_list = []
y_list = []
look_back = input_size
num_features = 1
count = 0
lastest_input_timestep_data_point = 0
for raw_data in blocks:
# Loading Data
if len(raw_data) >= input_size + output_size:
# raw_data = raw_data[-7200:]
latest_timestamp = raw_data[-1:][0][0]
logger.debug(latest_timestamp)
if latest_timestamp > lastest_input_timestep_data_point:
lastest_input_timestep_data_point = latest_timestamp
# df = pd.DataFrame(raw_data, columns=col_heads)
df = pd.DataFrame(raw_data)
df = df[df.columns[:2]]
df.columns = ['Time', 'Electricity']
new_df = df
new_df.columns = ['DateTime', 'Electricity']
# Changing dtype to pandas datetime format
new_df['DateTime'] = pd.to_datetime(new_df['DateTime'], unit='s')
new_df = new_df.set_index('DateTime')
# checking for null values and if any, replacing them with last valid observation
new_df.isnull().sum()
new_df.Electricity.fillna(method='pad', inplace=True)
# scale the data to be in the range (0, 1)
data = new_df.values.reshape(-1, 1)
# scaler = MinMaxScaler(feature_range=(0, 1), copy=False)
# data = scaler.fit_transform(data)
flat_list = [item for sublist in data for item in sublist]
# Quantile Normalization
s = | pd.Series(flat_list) | pandas.Series |
#51. 特徴量抽出
'''
学習データ,検証データ,評価データから特徴量を抽出し,
それぞれtrain.feature.txt,valid.feature.txt,test.feature.txtというファイル名で保存せよ.
なお,カテゴリ分類に有用そうな特徴量は各自で自由に設計せよ.
記事の見出しを単語列に変換したものが最低限のベースラインとなるであろう.
'''
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import string
import re
def preprocessing(text):
table = str.maketrans(string.punctuation, ' '*len(string.punctuation))
text = text.translate(table) # 記号をスペースに置換
text = text.lower() # 小文字化
text = re.sub('[0-9]+', '0', text) # 数字列を0に置換
return text
X_train = pd.read_table('./train.txt', header=None)
X_valid = pd.read_table('./valid.txt', header=None)
X_test = pd.read_table('./test.txt', header=None)
use_cols = ['TITLE', 'CATEGORY']
X_train.columns = use_cols
X_valid.columns = use_cols
X_test.columns = use_cols
X_train['TMP'] = 'train'
X_valid['TMP'] = 'valid'
X_test['TMP'] = 'test'
# データの再結合
df = | pd.concat([X_train, X_valid, X_test], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import pandas.testing as tm
import ibis
from ibis.expr import datatypes as dt
from ibis.expr import schema as sch
def test_infer_basic_types():
df = pd.DataFrame(
{
'bigint_col': np.array(
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90], dtype='i8'
),
'bool_col': np.array(
[
True,
False,
True,
False,
True,
None,
True,
False,
True,
False,
],
dtype=np.bool_,
),
'bool_obj_col': np.array(
[
True,
False,
np.nan,
False,
True,
np.nan,
True,
np.nan,
True,
False,
],
dtype=np.object_,
),
'date_string_col': [
'11/01/10',
None,
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
],
'double_col': np.array(
[
0.0,
10.1,
np.nan,
30.299999999999997,
40.399999999999999,
50.5,
60.599999999999994,
70.700000000000003,
80.799999999999997,
90.899999999999991,
],
dtype=np.float64,
),
'float_col': np.array(
[
np.nan,
1.1000000238418579,
2.2000000476837158,
3.2999999523162842,
4.4000000953674316,
5.5,
6.5999999046325684,
7.6999998092651367,
8.8000001907348633,
9.8999996185302734,
],
dtype='f4',
),
'int_col': np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i4'),
'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11],
'smallint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i2'
),
'string_col': [
'0',
'1',
None,
'double , whammy',
'4',
'5',
'6',
'7',
'8',
'9',
],
'timestamp_col': [
pd.Timestamp('2010-11-01 00:00:00'),
None,
pd.Timestamp('2010-11-01 00:02:00.100000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
pd.Timestamp('2010-11-01 00:04:00.600000'),
pd.Timestamp('2010-11-01 00:05:00.100000'),
pd.Timestamp('2010-11-01 00:06:00.150000'),
pd.Timestamp('2010-11-01 00:07:00.210000'),
pd.Timestamp('2010-11-01 00:08:00.280000'),
pd.Timestamp('2010-11-01 00:09:00.360000'),
],
'tinyint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i1'
),
'year': [
2010,
2010,
2010,
2010,
2010,
2009,
2009,
2009,
2009,
2009,
],
}
)
expected = [
('bigint_col', dt.int64),
('bool_col', dt.boolean),
('bool_obj_col', dt.boolean),
('date_string_col', dt.string),
('double_col', dt.double),
('float_col', dt.float),
('int_col', dt.int32),
('month', dt.int64),
('smallint_col', dt.int16),
('string_col', dt.string),
('timestamp_col', dt.timestamp),
('tinyint_col', dt.int8),
('year', dt.int64),
]
assert sch.infer(df) == ibis.schema(expected)
def test_infer_array():
df = pd.DataFrame(
{
# Columns containing np.arrays
'int64_arr_col': [
np.array([0, 1], dtype='int64'),
np.array([3, 4], dtype='int64'),
],
'string_arr_col': [np.array(['0', '1']), np.array(['3', '4'])],
# Columns containing pd.Series
'int64_series_col': [
pd.Series([0, 1], dtype='int64'),
| pd.Series([3, 4], dtype='int64') | pandas.Series |
import pandas as pd
def convert_nested_to_dataframe(agg, dates_as_key=True):
'''A function that takes nested elasticsearch response with aggregation and returns a nested dataframe
Warning: This is a recursive function, and rather non-intuitive to understand
This function takes nested and crossed aggregations and converts them to an easy to manipulates pandas dataframe
e.g. Here we have a gender aggregation nested in year which is nested in state
the output we want:
state year gender doc_count
CA 2000 male 2
CA 2000 female 5
CA 2001 male 5
CA 2001 female 5
CA 2002 male 5
CA 2002 female 5
MN 2000 male 2
MN 2000 female 5
MN 2001 male 5
MN 2001 female 5
MN 2002 male 5
MN 2002 female 5
NY 2000 male 2
NY 2000 female 5
NY 2001 male 5
NY 2001 female 5
NY 2002 male 5
NY 2002 female 5
What we do is step down through all the layers of nested data (recursively) until we reach the end,
and from the end, start creating pandas dataframes that get merged back into one giant dataframe
this function is in an experimental state, and currently only tested on 3 nested levels,
TODO crossed data does not work
:param agg: an aggregation from elasticsearch results with nesting
:type agg: elasticsearch response.aggregation object
:returns: pandas data frame like example above, with nested data
'''
crossed_cats_expanded = []
high_level_returning = False
agg_as_dict = agg.to_dict()
cat_names = [item for item in agg_as_dict.keys() if type(agg_as_dict[item]) is dict]
for cat_name in cat_names: # TODO deal with multiple aggregations at the same level (Crossing)
expanded_buckets = []
merge_vert = False
if not len(getattr(agg, cat_name).buckets):
raise ValueError('There is no count data in the lowest level of nesting. Is your search setup correctly?')
for bucket in getattr(agg, cat_name).buckets:
bucket_as_dict = bucket.to_dict()
if dict not in [type(item) for item in bucket_as_dict.values()]:
# we are at lowest level, begin return
if ('key_as_string' in bucket_as_dict.keys()) and dates_as_key: # change dates to readble format
bucket_as_dict['key'] = bucket['key_as_string']
bucket_as_dict.pop('key_as_string')
bucket_as_dict[cat_name] = bucket_as_dict.pop(
'key') # change the name of the key to something meaningful
expanded_buckets.append(bucket_as_dict) # combine each dict at the lowest level
else:
# We are at some level other than the lowest
level_name = str(bucket.key) # save the name of this level
lower_level_return = convert_nested_to_dataframe(bucket) # and drop down into the next level
expanded_buckets.append(add_category_labels(level_name, cat_name, lower_level_return))
merge_vert = True
if not merge_vert:
dataframe_out = pd.DataFrame(expanded_buckets)
dataframe_out.rename(columns=lambda x: x.replace('key', cat_name))
crossed_cats_expanded.append(dataframe_out.reset_index(drop=True))
high_level_returning = True
if high_level_returning:
return pd.concat(crossed_cats_expanded, axis=1).reset_index(drop=True)
else:
return pd.concat(expanded_buckets, axis=0).reset_index(drop=True)
def add_category_labels(level_name, cat_name, dataframe_needing_cat):
'''A function that adds a category name column to a pandas dataframe
:param level_name: an aggregation from elasticsearch results with nesting
:type level_name: elasticsearch response.aggregation object
:param cat_name: an aggregation from elasticsearch results with nesting
:type cat_name: elasticsearch response.aggregation object
:param dataframe_needing_cat: a pandas dataframe to append category name too
:type dataframe_needing_cat: elasticsearch response.aggregation object
:returns: pandas data frame like example above, with nested data
'''
cat_name_dataframe = pd.DataFrame(
[level_name for i in range(0, dataframe_needing_cat.shape[0])]) # create a cat name column
cat_name_dataframe.columns = [cat_name] # name the column something meaningful
return | pd.concat([cat_name_dataframe, dataframe_needing_cat], axis=1) | pandas.concat |
#%% [markdown]
# # Author : <NAME>
# ***
# ## Capstone Project for Qualifying IBM Data Science Professional Certification
# ***
#%% [markdown]
#
# # Import Packages
#
#%%
import numpy as np # library to handle data in a vectorized manner
import pandas as pd # library for data analsysis
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 1000)
from bs4 import BeautifulSoup
import requests, json
import geocoder
# !conda install -c conda-forge geopy --yes # uncomment this line if you haven't completed the Foursquare API lab
from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
# Matplotlib and associated plotting modules
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
# import k-means from clustering stage
from sklearn.cluster import KMeans
# We use StandardScaler() to normalize our dataset.
from sklearn.preprocessing import StandardScaler
# !conda install -c conda-forge folium=0.5.0 --yes # uncomment this line if you haven't completed the Foursquare API lab
import folium # map rendering library
import os
#%% [markdown]
#
# # Introduction/Business Problem
#
# A travel agency which is specialized in arranging vacations for elderly French people, want to have a solution
# to find out the right travel recommendation for their client's specific accomodation needs.
#
# While the agency provide a bunch of detailed content via their web site, digging tons of pages and magazines
# cost endless hours of their customers without making any desicion to book a holiday. The agency would like to provide some mechanism for their clients to shorten the time wasted in questing for desired holiday destinations and make them take advantage of data science techniques to save time in their quest for finding best
#
#%% [markdown]
# # 1. Introduction/Business Problem
#
# A travel agency which is specialised in arranging holidays for elderly French people, want to have a solution to find out the right travel recommendation for their client's specific accommodation needs of city tours.
#
# While the agency provides a bunch of detailed content via their website, digging tons of pages and magazines cost endless hours of their customers without making any decision to book a holiday.
#
# The agency states the following wishes of their customers to be meet for their guaranteed pleasant stay in the touristic destinations.
#
# - They are elderly retired French people and they are not open to the cuisines other than French Kitchen. So they would like to eat at a French restaurant or Bistro. A destination neighbourhood having more French restaurants and bistros are preferable over other neighbourhoods having less.
#
# - They would like to go to venues such as Museums, Performing Art Venues, Theatres and Movie Theatres. A neighbourhood with more of these venues are preferable over other neighbourhoods having less
#
# - They would like to stay in hotels where all the venues (Restaurants, museums etc) should not be away more than 15 minutes walking distance. A hotel with lesser walking distance to venues is more preferable than the ones having a longer distance.
#
# Based on the given customer profile and customer requirements, the agency would like to provide some mechanism for their clients to shorten the time wasted in searching for their ideal places to stay in the cities they want to visit. In other words, they would like to offer their clients some tool in their quest for finding the best neighbourhoods to stay in their desired destinations.
#
#
#
#%% [markdown]
# # 2. Analytic Approach & Learning Model
# Based on the requirements mentioned in the Business Problem, we are not told about the best/right neighbourhoods and we can not make any distinctions between neighbourhouds regarding the requirements of the clients.
# On the other hand, we are required to recommend the neighbourhoods having similar characteristics suited to the needs of the clients so that they can find out the desired vacation destination in a short period of time. In our situation, finding similarities of a given neighbourhoods can be best be addressed by Unsupervised Learning algorithms by grouping data in clusters. Therefore, we will use K-Means Clustering Algorithm.
#
#
#%% [markdown]
# # 3. Collect, Prepare and Clean Data
# ***
#
# We need to collect appropriate data for segmentation of neighbourhoods such that they can satisfy the clients requirements.
#
#%% [markdown]
# ### Initialize Neighboorhoods to include in Analysis
# We do not include all the neighbourhoods in our project for a few reasons:
# 1. To avoid unexpected costs due to exceeding number of Google Distance Matrix API calls.
# 2. Increasing number of Neighbourhoods in the analysis causes performance issues due to Google API calls (Slow network response time after 100 API Calls)
#%%
neigbourhood_list=[]
neighbourhood_filter=[
"Stadsdriehoek/Rotterdam Centrum",
"Cool/Rotterdam Centrum",
"Rotterdam Centraal/Weena",
"Oude Westen",
"Dijkzigt",
"Scheepvaartkwartier/Nieuwe Werk",
"Middelland",
"Nieuwe Westen",
"Coolhaveneiland",
"Hillegersberg Centrum",
"Kralingen",
"Feijenoord/Noordereiland/Kop van Zuid",
"Afrikaanderwijk/Katendrecht",
"Oud-Charlois",
"Carnisse/Zuidplein",
"Waalhaven"
]
#%% [markdown]
# ### Initialize variables
#
# During the course of data collection we will use Google Distance Matrix API and Foursquare Developer API's( therefore you need to have proper accounts Google Cloud Platform Account and Foursquare developer accounts) In order to make those API calls, we need to have some credentials such as Google API'keys and Foursquare Client Id & Secret. Let's set them below properly.
#%%
walking_duration=900 # walking distance from hotel to venues in seconds
fsq_version = '20180605' # Foursquare API version
fsq_limit = 100 # limit of number of venues returned by Foursquare API
fsq_venues_in_radius=1000 # search venues within this radius from city center
# Credentials to make Foursquare API Calls
# Set FSQ_CLIENT_ID,FSQ_CLIENT_SECRET as shell environment variable
fsq_client_id = os.environ.get('FSQ_CLIENT_ID') # Foursquare Developer API credential - Client Id
fsq_client_secret = os.environ.get('FSQ_CLIENT_SECRET') # Foursquare Developer API credential - Client Secret
# Google API key to make Google Map API calls (distance matrix api, geolocation api etc)
# Set GOOGLE_API_KEY as shell environment variable
google_api_key = os.environ.get('GOOGLE_API_KEY')
fsq_venue_categories_filter = [
{ 'category': 'Hotel', 'id': '4bf58dd8d48988d1fa931735'},
{ 'category': 'Movie Theater', 'id': '4bf58dd8d48988d17f941735'},
{ 'category': 'Theater', 'id': '4bf58dd8d48988d137941735'},
{ 'category': 'French Restaurant', 'id': '4bf58dd8d48988d10c941735'},
{ 'category': 'Muesum', 'id': '4bf58dd8d48988d181941735'},
{ 'category': 'Music Venue', 'id': '4bf58dd8d48988d1e5931735'},
{ 'category': 'Performing Arts Venue', 'id': '4bf58dd8d48988d1f2931735'},
{ 'category': 'Bistro', 'id': '52e81612bcbc57f1066b79f1'}
]
fsq_venue_categories_filter_df = pd.DataFrame(fsq_venue_categories_filter)
categories = ','.join(list(fsq_venue_categories_filter_df['id'].values))
category_list = fsq_venue_categories_filter_df['id'].values
#%% [markdown]
#
# ***
# # Functions used in this project
# ***
#%% [markdown]
# ### Function to scrape neighbourhoods from Wikipedia
#
# Using **BeatifulSoup**, this function scrapes the neighbourhoods from following wikipedia page. Note that this function can scrape only the specific page given below.
# - [Neighbourhoods of Rotterdam](https://nl.wikipedia.org/wiki/Lijst_van_postcodes_3000-3999_in_Nederland)
#
#%%
def scrape_neighbourhoods_rotterdam():
def prepare_neigbourhood_list(root_element):
for neigbourhood_li_element in root_element:
neigbourhood_name_list=[]
if neigbourhood_li_element.find("a") != -1:
postal_code_string=neigbourhood_li_element.contents[0].strip()
postal_codes=postal_code_string.split("-")
for neigbourhood_a_element in neigbourhood_li_element.findAll("a"):
neigbourhood_name_list.append(neigbourhood_a_element.get_text())
neigbourhood_name='/'.join(neigbourhood_name_list)
if len(postal_codes) == 2:
neigbourhood_list.append({'Neighbourhood Postal Code':postal_codes[0], 'Neighbourhood':neigbourhood_name})
neigbourhood_list.append({'Neighbourhood Postal Code':postal_codes[1], 'Neighbourhood':neigbourhood_name})
else:
neigbourhood_list.append({'Neighbourhood Postal Code':postal_codes[0], 'Neighbourhood':neigbourhood_name})
resp=requests.get('https://nl.wikipedia.org/wiki/Lijst_van_postcodes_3000-3999_in_Nederland')
assert resp.status_code == 200, "Http link to postal codes is not valid!"
soup = BeautifulSoup(resp.text, 'html.parser')
root_element=soup.find("a",title="<NAME>").parent.find("ul")
prepare_neigbourhood_list(root_element)
root_element=soup.find("a",title="Rotterdam-Zuid").parent.find("ul")
prepare_neigbourhood_list(root_element)
# For given address of a neighbourhood, get geographic coordinates using geocoder
for neighbourhood_dict in neigbourhood_list:
neighbourhood_address=neighbourhood_dict['Neighbourhood Postal Code'] + ', Rotterdam, NL'
g = geocoder.google(neighbourhood_address)
neighbourhood_dict['Latitude']=g.latlng[0]
neighbourhood_dict['Longitude']=g.latlng[1]
print("Neighbourhood : {} Lat-Long : {}".format(neighbourhood_dict['Neighbourhood'], g.latlng))
#%% [markdown]
# ### Function to get walking distance from hotels to venues in seconds
# Using Google Distance Matrix API, this function calculates the distance between for each given hotel en venue pairs stored in "row" parameter.
#
#%%
def getDistanceDuration(row):
# set coordinates
source = '{},{}'.format(row['Hotel Latitude'], row['Hotel Longitude'])
dest= '{},{}'.format(row['Venue Latitude'], row['Venue Longitude'])
# url variable store url
url ='https://maps.googleapis.com/maps/api/distancematrix/json?'
# Get method of requests module
# return response object
r = requests.get(url + 'units=metric'
'&mode=walking'
'&origins=' + source +
'&destinations=' + dest +
'&key=' + google_api_key)
# json method of response object
# return json format result
res = r.json()
distance_to_venue=res['rows'][0]['elements'][0]['distance']['value']
time_to_venue=res['rows'][0]['elements'][0]['duration']['value']
print('> Hotel :{} Venue:{} Distance:{}'.format(row['Hotel'],row['Venue'],distance_to_venue))
return pd.Series([distance_to_venue,time_to_venue])
#%% [markdown]
# ### Function to get venues within given radius from the centre of the Neighbourhood
# Using Foursquare developer API, this function fetches all the venues based on the given venue categories falling into the radius from the center of the destination city.
#%%
def getNearbyVenues(neigbourhoods, postal_codes, latitudes, longitudes, radius=500):
venues_list=[]
for neigbourhood_name, neighbourhood_postal_code, lat, lng in zip(neigbourhoods, postal_codes, latitudes, longitudes):
print(neigbourhood_name)
# create the API request URL to search venues around the given coordinates of ll(latitude,longitude)
url = 'https://api.foursquare.com/v2/venues/search?&client_id={}&client_secret={}&v={}&ll={},{}&categoryId={}&radius={}&limit={}'.format(
fsq_client_id,
fsq_client_secret,
fsq_version,
lat,
lng,
categories,
radius,
fsq_limit)
# make the GET request
results = requests.get(url).json()["response"]['venues']
# return only relevant information for each nearby venue
venues_list.append([(
neigbourhood_name,
neighbourhood_postal_code,
lat,
lng,
v['name'],
v['location']['lat'],
v['location']['lng'],
v['location']['distance'],
(lambda v: v['location']['postalCode'] if 'postalCode' in v['location'] else 'Empty')(v),
(lambda v: v['location']['formattedAddress'] if 'address' in v['location'] else 'Empty')(v),
v['categories'][0]['name']) for v in results
# venue's category should be a primary category contained in category_list
if list(filter(lambda x: x['primary'], v['categories']))[0]['id'] in category_list
])
# prepare the relevant dataframe for all the venues fetched from Foursquare
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighbourhood',
'Neighbourhood Postal Code',
'Neighbourhood Latitude',
'Neighbourhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Distance',
'Venue Postal Code',
'Venue formattedAddress',
'Venue Category']
return(nearby_venues)
#%% [markdown]
# ### Function to deal with missing postal codes of some venues
# After fetching the venues, we have observed that some venue's addresses were missing. We know that postal code is a very important feature that need to be filled with. Since one of main objective is to fullfill the requirement of finding out number of venues in a given neighbourhood.
# - This function below can fetch the postal codes for a given location addressed with geographic coordinates. It uses **geocoder** module which in turn uses Google Maps Geolocation API's
#
#%%
cnt=0
def getPostal(row):
global cnt
if (row['Venue Postal Code'] == 'Empty'):
rg=geocoder.google([row['Venue Latitude'],row['Venue Longitude']], method='reverse')
resJson2=rg.geojson
postal=resJson2['features'][0]['properties']['postal']
postal_prefix=postal.split(' ')[0][:4]
result_series=pd.Series([postal_prefix,postal])
else:
postal_prefix=row['Venue Postal Code'].split(' ')[0][:4]
result_series= | pd.Series([postal_prefix,row['Venue Postal Code']]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal( | lib.maybe_convert_objects(arr) | pandas._libs.lib.maybe_convert_objects |
import datetime
import requests
import pandas as pd
from asset_manager.assets.Order import Bid, Ask
from asset_manager.assets.OrderBook import OrderBook
class KrakenConnector:
def __init__(self):
self.url = "https://api.kraken.com/0/public/{lookup}"
def get_prices(self, start_date, asset_pair, interval=1):
start_date = datetime.datetime.timestamp(start_date)
params = {"since": start_date, "pair": asset_pair, "interval": interval}
prices_url = self.url.format(lookup = "OHLC")
data = self.send_get_request(prices_url, params)["result"]
data = self.get_first_data_content(data)
if start_date == data[0][0]:
return "UP-TO-DATE"
return self.transform_price_to_dataframe(data)
def get_order_book(self, asset_pair):
current_timestamp = datetime.datetime.utcnow()
current_timestamp = current_timestamp.replace(tzinfo=datetime.timezone.utc)
params = {"pair": asset_pair}
order_book_url=self.url.format(lookup="Depth")
data = self.send_get_request(order_book_url, params)["result"]
data = self.get_first_data_content(data)
bids = self.create_bids(data["bids"])
asks = self.create_asks(data["asks"])
return OrderBook(current_timestamp, bids, asks)
def create_bids(self, bids):
return [Bid(b[0], b[1], b[2]) for b in bids]
def create_asks(self, asks):
return [Ask(a[0], a[1], a[2]) for a in asks]
def get_first_data_content(self, data):
"""Kraken changes the assetpair name and uses it as a key,
since we don't know it we need to loop to find the data
and hope it is the first value."""
i=0
for key, value in data.items():
if key != "last":
i+=1
single_data = value
if i!=1:
raise ValueError("Data incorrectly parsed.")
return single_data
def send_get_request(self, url, params):
response = requests.get(url, params)
if response.status_code != 200:
raise ConnectionError("""Status code: {0} received by sending url to
{1} with parameters {2}.""".format(response.status_code, url, params))
return response.json()
def transform_price_to_dataframe(self, data):
df = pd.DataFrame(data)
df.columns = ["time", "open", "high", "low", "close", "vwap", "volume", "trades_count"]
df.index = | pd.to_datetime(df["time"], utc=True, unit='s') | pandas.to_datetime |
#!/usr/bin/env python
import os
import re
from glob import glob
import json
import pandas
import datetime
import codecs
here = os.path.dirname(os.path.abspath(__file__))
folder = os.path.basename(here)
latest = '%s/latest' % here
year = datetime.datetime.today().year
output_data = os.path.join(here, 'data-latest.tsv')
output_year = os.path.join(here, 'data-%s.tsv' % year)
# Don't continue if we don't have latest folder
if not os.path.exists(latest):
print('%s does not have parsed data.' % folder)
sys.exit(0)
# Don't continue if we don't have results.json
results_json = os.path.join(latest, 'records.json')
if not os.path.exists(results_json):
print('%s does not have results.json' % folder)
sys.exit(1)
with open(results_json, 'r') as filey:
results = json.loads(filey.read())
columns = ['charge_code',
'price',
'description',
'hospital_id',
'filename',
'charge_type']
df = pandas.DataFrame(columns=columns)
seen = []
for result in results:
filename = os.path.join(latest, result['filename'])
if not os.path.exists(filename):
print('%s is not found in latest folder.' % filename)
continue
if os.stat(filename).st_size == 0:
print('%s is empty, skipping.' % filename)
continue
charge_type = 'standard'
if "drg" in filename.lower():
charge_type = "drg"
if result['filename'] in seen:
continue
seen.append(result['filename'])
print("Parsing %s" % filename)
if filename.endswith('csv'):
if "uncmc-published" in filename or "rex-published" in filename or "wayne-published" in filename:
# 'Charge Description,Receivable Owner,Service ID,Service Provider,Service Type,Eff Rate Amt\r\n'
with codecs.open(filename, "r", encoding='utf-8', errors='ignore') as filey:
lines = filey.readlines()
for l in range(2, len(lines)):
idx = df.shape[0] + 1
line = lines[l].strip('\n').strip('\r').strip(',').strip()
price = line.split(',')[-1]
line = line.replace(',%s' %price, '')
parts = re.split(r',(?=")', line)
description = parts[0].strip('"')
price = price.strip('"').replace('$','').replace(',','').strip()
if len(parts) > 1:
code = parts[1].strip('"').strip()
else:
code = None
entry = [code, # charge code
price, # price
description, # description
result["hospital_id"], # hospital_id
result['filename'],
charge_type]
df.loc[idx,:] = entry
# ['DESCRIPTION', 'CHARGE', 'CPT CODE']
elif charge_type == "standard":
if 'caldwell' in filename or "chatham" in filename or "johnston" in filename or "lenoir" in filename.lower() or "nash" in filename or "pardee" in filename:
content = pandas.read_csv(filename, header=None)
content.columns = ['DESCRIPTION', 'CPT CODE', 'CHARGE']
print(content.head())
else:
content = pandas.read_csv(filename)
columns = [x.strip() for x in content.columns.tolist()]
content.columns = columns
for row in content.iterrows():
idx = df.shape[0] + 1
price = row[1]['CHARGE']
if not isinstance(price, float):
price = price.replace('$','').replace(',','').strip()
entry = [row[1]['CPT CODE'], # charge code
price, # price
row[1]['DESCRIPTION'], # description
result["hospital_id"], # hospital_id
result['filename'],
charge_type]
df.loc[idx,:] = entry
else:
# ['Location', 'MS DRG - Code', 'MS DRG - Description',' Average Charge ']
if "csv-caldwell" in filename or "csv-chatham" in filename or "johnston-drg" in filename or "pardee-drg" in filename or "unc-hospitals-drg" in filename or "rex-drg" in filename:
content = | pandas.read_csv(filename, skiprows=5) | pandas.read_csv |
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import accuracy_score
class CNN:
def __init__(self, id, num_classes, input_shape=(1, 64, 64),
dense_layers=None, conv_layers=None, augmentation=None,
learning_rate=0.001):
if dense_layers is None:
dense_layers = [256]
if conv_layers is None:
conv_layers = [(16, 5), (8, 3)]
assert len(conv_layers) > 0, "At least one convolutional layer is required."
self.id = id
self.learning_rate = learning_rate
self.input_shape = input_shape
self.num_classes = num_classes
self.dense_layers = dense_layers # List of layer sizes
self.conv_layers = conv_layers # List of tuples with (channels, kernel_size)
self.augmentation = augmentation # Dictionary, e.g. {'horizontal_flip': True, 'rotation_range': 90}
def create_model(self):
model = Sequential()
model.add(Conv2D(self.conv_layers[0][0], self.conv_layers[0][1], data_format='channels_first',
activation='relu', input_shape=self.input_shape))
model.add(MaxPooling2D())
for conv_layer in self.conv_layers[1:]:
model.add(Conv2D(conv_layer[0], conv_layer[1], data_format='channels_first', activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
for dense_layer in self.dense_layers:
model.add(Dense(dense_layer, activation='relu'))
model.add(Dense(self.num_classes, activation='softmax'))
optimizer = Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.model = model
def fit(self, X, y, X_valid, y_valid, epochs=400, batch_size=32):
chk = ModelCheckpoint('best_cnn_' + self.id + '.pkl', monitor='val_accuracy',
save_best_only=True, mode='max', verbose=1)
if self.augmentation == {}:
self.model.fit(X, y, epochs=epochs, batch_size=batch_size, callbacks=[chk],
validation_data=(X_valid, y_valid))
else:
datagen = ImageDataGenerator(**self.augmentation, fill_mode="nearest")
datagen.fit(X)
self.model.fit_generator(datagen.flow(X, y, batch_size=batch_size),
epochs=epochs, callbacks=[chk], steps_per_epoch=len(X) // batch_size * 2,
validation_data=(X_valid, y_valid))
def evaluate(self, X, y, enc):
model = load_model('best_cnn_' + self.id + '.pkl')
y_pred = model.predict_classes(X)
y_true = [np.argmax(t) for t in y]
acc = accuracy_score(y_true, y_pred)
conf = tf.math.confusion_matrix(labels=y_true, predictions=y_pred).numpy()
con_mat_norm = np.around(conf.astype('float') / conf.sum(axis=1)[:, np.newaxis], decimals=2)
con_mat_df = | pd.DataFrame(con_mat_norm, index=enc, columns=enc) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = | IntervalIndex.from_intervals(data) | pandas.IntervalIndex.from_intervals |
import pandas as pd
from interface import *
from os import remove
from plotagem import *
def pelagens(txt=''):
pelagens = ['Alazão', 'Amarillho', 'Baio', 'Pampa', 'Preta', 'Rosilho', 'Tordilho']
escolha = menu(pelagens, txt+'Pelagem Principal:')
for i, v in enumerate(pelagens):
if escolha == i+1:
prim = pelagens[i]
if prim == 'Pampa' or prim == 'Rosilho' or prim == 'Tordilho':
escolha2 = menu(pelagens, f'Pelagem secundária - ({prim} de..)')
for i, v in enumerate(pelagens):
if escolha2 == i+1:
sec = pelagens[i]
if escolha not in (4,6,7):
return prim.lower()
if escolha == escolha2:
return prim.lower()
else:
return (prim+' de '+sec).lower()
def cadastrar():
'''
Coleta todos os dados do animal
'''
lista = []
indice = []
cadastro = {'nome': input('Nome: '), 'dnasc': leiaData('Dia-Mês-Ano de nascimento: '),
'sexo': leiaSexo('Sexo: '), 'pelagem': pelagens(), 'meses informados': indice, 'medidas': lista}
atual = leiaInt('Com quantos meses de idade foi registrada a última medição? ') # usar datetime pra calcular automaticamente
for x in range(atual + 1):
y = leiaInt(f'Informe a altura de cernelha em cm para o mes {x}: (0 caso não exista.) ')
if y > 0:
lista.append(y)
indice.append(x)
print(cadastro)
return cadastro
def novoCadastro():
'''
salva novo cadastro
'''
cabeçalho('NOVO CADASTRO')
recip = cadastrar()
resp1 = menu(['Salvar dados?', 'Descartar e voltar'], 'SALVAR')
while True:
if resp1 == 1:
savetocsv(recip)
break
elif resp1 == 2:
break
def dropLineTxt(file, animal):
try:
f = open(file,'r', encoding='utf8')
lines = f.readlines()
f.close()
f = open('cadastrados.txt','w',encoding='utf8')
for line in lines:
if line != animal +'\n':
f.write(line)
f.close()
except:
print('Algum problema ao excluir o animal da lista de cadastrados!')
finally:
print(f'{animal} excluído da lista de cadastrados com sucesso!')
def selectAndDropCadastro():
'''
Apresenta uma lista de cadastrados e pede pra selecionar um deles para remoção
'''
cadastrados = txtTolist('cadastrados.txt') # converte o txt dos cadastrados em uma lista
cadastrados.sort()
cadastrados.append('Voltar') # adiciona a opção voltar à posição len(cadastrados)
while True:
resp2 = menu(cadastrados, 'EXCLUIR CADASTRO') # menu com a lista de animais + voltar
if resp2 == len(cadastrados):
break
for indice, valor in enumerate(cadastrados): # gira a lista de cadastrados pra pegar o nome do arquivo
# a ser lido
if resp2 == indice + 1:
animal = valor
confirm = menu(['Sim','Não'], f'DESEJA REALMENTE EXCLUIR {animal}?')
if confirm == 1:
dados = remove(f'arquivo/{animal}.csv')
print(f'Dados de {animal} excluídos com sucesso!')
dropLineTxt('cadastrados.txt',animal)
else:
break
def dropCadastro(nome):
'''
Remove um único cadastro pelo nome
Exclui tanto o arquivo csv quanto o nome do cadastrados.txt
'''
remove(f'arquivo/{nome}.csv')
print(f'Dados de {nome} excluídos com sucesso!')
dropLineTxt('cadastrados.txt',nome)
def enlistCadastrados():
cadastrados = txtTolist('cadastrados.txt') # converte o txt dos cadastrados em uma lista
cadastrados = sorted(cadastrados)
cadastrados.append('Voltar') # adiciona a opção voltar à posição len(cadastrados)
return cadastrados
def nomeEscolhido(animalnum, listacadastrados):
for indice, valor in enumerate(listacadastrados): # gira a lista de cadastrados pra pegar o nome do arquivo
# a ser lido
if animalnum == indice + 1:
animal = valor
dados = pd.read_csv(f'arquivo/{animal}.csv')
return animal, dados
def getData(dados):
nome = dados["nome"][0]
nascimento = dados["dnasc"][0]
pelagem = dados["pelagem"][0]
sexo = dados["sexo"][0]
altura = dados["medidas"].max()
return nome, nascimento, pelagem, sexo, altura
def showData(dados):
print(f'Nome: {dados["nome"][0]:>36}')
print(f'Nascimento: {dados["dnasc"][0]:>30}')
print(f'Pelagem: {dados["pelagem"][0]:>33}')
print(f'Sexo: {dados["sexo"][0]:>36}')
print(f'Altura: {dados["medidas"].max():>32}cm')
def alterName(df,item):
selecionado = str(df[item][0])
df.nome = str(input('Novo nome: '))
print(f'Nome alterado para {df[item][0]}')
while True:
resp = menu(['Sim','Não'], 'Deseja Confirmar a mudança?')
if resp == 1:
del df['Unnamed: 0']
df.to_csv(f'arquivo/{df["nome"][0]}.csv')
addtocadastrados('cadastrados.txt', f'{df["nome"][0]}' )
dropCadastro(selecionado)
break
else:
break
def alterItem(df,item,dado):
df[item] = dado
try:
del df['Unnamed: 0']
except:
print('\o/')
finally:
df.to_csv(f'arquivo/{df["nome"][0]}.csv')
def insertAltura(animal,mes,medida):
colect = []
labels = open(f'arquivo/{animal}.csv','r').readline().strip().split(',') #pega os labels das colunas e bota em uma lista
#cria uma lista de dicionários com todos os dados do csv
with open(f'arquivo/{animal}.csv', 'r') as dados:
for dado in dados.readlines():
d = dado.strip().replace('\n','')
d_lista = d.split(",")
animal_dict = {}
for indice, valor in enumerate(d_lista):
animal_dict[labels[indice]] = valor
colect.append(animal_dict)
del colect[0] #elimina a primeira linha que contem apenas labels
primedic = colect[0] # pega o primeiro dicionário da lista de dicionários para servir de modelo de dicionário
del primedic['']
#cria duas listas para armazenar os meses informados e as alturas desses meses
listaDeMeses = []
listaDeAlturas = []
#roda um loop do tamanho da quantidade de dicionários da grande lista que é definida pela quantidade de meses informados
for i in range(len(colect)):
listaDeMeses.append(i) #adiciona os índices a lista de meses informados
alt = colect[i].get('medidas') #pega as alturas informadas em cada mês informado
listaDeAlturas.append(int(alt)) #adiciona as alturas a lista de Alturas
#adição dos dados informados (adicionar verificação depois)
listaDeMeses.append(mes)
listaDeAlturas.append(medida)
#adiciona as listas ao dicionário modelo
primedic['meses informados'] = listaDeMeses
primedic['medidas'] = listaDeAlturas
remove(f'arquivo/{animal}.csv') #remove versão anterior do csv
df = | pd.DataFrame(primedic) | pandas.DataFrame |
import dash, os, itertools, flask
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from pandas_datareader import data as web
from datetime import datetime as dt
import plotly.graph_objs as go
import pandas as pd
from random import randint
import plotly.plotly as py
server = flask.Flask(__name__)
server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash(name = __name__, server = server)
app.config.supress_callback_exceptions = True
#Data variables
cli = pd.read_pickle('Climate_full.p')
models_list = ['GFDL-CM3', 'GISS-E2-R', 'NCAR-CCSM4', 'IPSL-CM5A-LR','MRI-CGCM3']
web = 'https://www.snap.uaf.edu/webshared/jschroder/db/CSV/'
metrics = [ 'avg_fire_size','number_of_fires','total_area_burned']
#Function updating #1 plot => Alfresco plot
def get_data( models , scenarios, metric, domain, cumsum ) :
metric = str(metric)
domain = str(domain)
def _get_metric_cumsum(lnk , cumsum ):
#Extract, average and cumsum the raw data to a dataframe
_df = | pd.read_csv(lnk, index_col=0) | pandas.read_csv |
"""
This python script allocates all the custom transformers that are specific for the project task.
The idea is to encapsulate the classes and functions used on pipelines to make codes cleaner.
"""
# Importing libraries
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem import RSLPStemmer
import re
from sklearn.model_selection import RandomizedSearchCV, cross_val_score, cross_val_predict, learning_curve
from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score, roc_curve, confusion_matrix
import time
from datetime import datetime
"""
-----------------------------------
----- 1. CUSTOM TRANSFORMERS ------
1.1 Functions
-----------------------------------
"""
def import_data(path, sep=',', optimized=True, n_lines=50, encoding='utf-8', usecols=None, verbose=True):
"""
This functions applies a csv reading in an optimized way, converting data types (float64 to float32 and
int 64 to int32), reducing script memory usage.
Parameters
----------
:param path: path reference for importing the data [type: string]
:param sep: separator parameter for read_csv() method [type: string, default: ',']
:param optimized: boolean flag for reading data in an optimized way [type: bool, default: True]
:param n_lines: number of lines read during the data type optimization [type: int, default: 50]
:param encoding: encoding param for read_csv() method [type: string, default: 'utf-8']
:param verbose: the verbose arg allow communication between steps [type: bool, default: True]
:param usecols: columns to read - set None to read all the columns [type: list, default: None]
Return
------
:return: df: file after the preparation steps [type: pd.DataFrame]
Application
-----------
# Reading the data and applying a data type conversion for optimizing the memory usage
df = import_data(filepath, optimized=True, n_lines=100)
"""
# Validating the optimized flag for optimizing memory usage
if optimized:
# Reading only the first rows of the data
df_raw = pd.read_csv(path, sep=sep, nrows=n_lines, encoding=encoding, usecols=usecols)
start_mem = df_raw.memory_usage().sum() / 1024 ** 2
# Columns were the optimization is applicable
float64_cols = [col for col, dtype in df_raw.dtypes.items() if dtype == 'float64']
int64_cols = [col for col, dtype in df_raw.dtypes.items() if dtype == 'int64']
total_opt = len(float64_cols) + len(int64_cols)
if verbose:
print(f'This dataset has {df_raw.shape[1]} columns, which {total_opt} is/are applicable to optimization.\n')
# Optimizing data types: float64 to float32
for col in float64_cols:
df_raw[col] = df_raw[col].astype('float32')
# Optimizing data types: int64 to int32
for col in int64_cols:
df_raw[col] = df_raw[col].astype('int32')
# Looking at memory reduction
if verbose:
print('----------------------------------------------------')
print(f'Memory usage ({n_lines} lines): {start_mem:.4f} MB')
end_mem = df_raw.memory_usage().sum() / 1024 ** 2
print(f'Memory usage after optimization ({n_lines} lines): {end_mem:.4f} MB')
print('----------------------------------------------------')
mem_reduction = 100 * (1 - (end_mem / start_mem))
print(f'\nReduction of {mem_reduction:.2f}% on memory usage\n')
# Creating an object with new dtypes
dtypes = df_raw.dtypes
col_names = dtypes.index
types = [dtype.name for dtype in dtypes.values]
column_types = dict(zip(col_names, types))
# Trying to read the dataset with new types
try:
return pd.read_csv(path, sep=sep, dtype=column_types, encoding=encoding, usecols=usecols)
except ValueError as e1:
# Error cach during data reading with new data types
print(f'ValueError on data reading: {e1}')
print('The dataset will be read without optimization types.')
return pd.read_csv(path, sep=sep, encoding=encoding, usecols=usecols)
else:
# Reading the data without optimization
return pd.read_csv(path, sep=sep, encoding=encoding, usecols=usecols)
"""
-----------------------------------
----- 1. CUSTOM TRANSFORMERS ------
1.2 Classes
-----------------------------------
"""
class ColumnMapping(BaseEstimator, TransformerMixin):
"""
This class applies the map() function into a DataFrame for transforming a columns given a mapping dictionary
Parameters
----------
:param old_col_name: name of the columns where mapping will be applied [type: string]
:param mapping_dict: python dictionary with key/value mapping [type: dict]
:param new_col_name: name of the new column resulted by mapping [type: string, default: 'target]
:param drop: flag that guides the dropping of the old_target_name column [type: bool, default: True]
Returns
-------
:return X: pandas DataFrame object after mapping application [type: pd.DataFrame]
Application
-----------
# Transforming a DataFrame column given a mapping dictionary
mapper = ColumnMapping(old_col_name='col_1', mapping_dict=dictionary, new_col_name='col_2', drop=True)
df_mapped = mapper.fit_transform(df)
"""
def __init__(self, old_col_name, mapping_dict, new_col_name='target', drop=True):
self.old_col_name = old_col_name
self.mapping_dict = mapping_dict
self.new_col_name = new_col_name
self.drop = drop
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Applying mapping
X[self.new_col_name] = X[self.old_col_name].map(self.mapping_dict)
# Dropping the old columns (if applicable)
if self.drop:
X.drop(self.old_col_name, axis=1, inplace=True)
return X
class DropNullData(BaseEstimator, TransformerMixin):
"""
This class drops null data. It's possible to select just some attributes to be filled with different values
Parameters
----------
:param cols_dropna: columns to be filled. Leave None if all the columns will be filled [type: list, default: None]
Return
------
:return: X: DataFrame object with NaN data filled [type: pd.DataFrame]
Application
-----------
null_dropper = DropNulldata(cols_to_fill=['colA', 'colB', 'colC'], value_fill=-999)
X = null_dropper.fit_transform(X)
"""
def __init__(self, cols_dropna=None):
self.cols_dropna = cols_dropna
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Filling null data according to passed args
if self.cols_dropna is not None:
X[self.cols_dropna] = X[self.cols_dropna].dropna()
return X
else:
return X.dropna()
class DropDuplicates(BaseEstimator, TransformerMixin):
"""
This class filters a dataset based on a set of features passed as argument.
It's not necessary to pass anything as args.
Return
------
:return: df: pandas DataFrame dropping duplicates [type: pd.DataFrame]
Application
-----------
dup_dropper = DropDuplicates()
df_nodup = dup_dropper.fit_transform(df)
"""
def fit(self, df, y=None):
return self
def transform(self, df, y=None):
return df.drop_duplicates()
"""
-----------------------------------
------ 2. TEXT TRANSFORMERS -------
2.1 Functions
-----------------------------------
"""
# [RegEx] Padrão para encontrar quebra de linha e retorno de carro (\n ou \r)
def re_breakline(text_list, text_sub=' '):
"""
Args:
----------
text_list: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
return [re.sub('[\n\r]', text_sub, r) for r in text_list]
# [RegEx] Padrão para encontrar sites ou hiperlinks
def re_hiperlinks(text_list, text_sub=' link '):
"""
Args:
----------
text_list: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
pattern = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
return [re.sub(pattern, text_sub, r) for r in text_list]
# [RegEx] Padrão para encontrar datas nos mais diversos formatos (dd/mm/yyyy, dd/mm/yy, dd.mm.yyyy, dd.mm.yy)
def re_dates(text_list, text_sub=' data '):
"""
Args:
----------
text_list: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
pattern = '([0-2][0-9]|(3)[0-1])(\/|\.)(((0)[0-9])|((1)[0-2]))(\/|\.)\d{2,4}'
return [re.sub(pattern, text_sub, r) for r in text_list]
# [RegEx] Padrão para encontrar valores financeiros (R$ ou $)
def re_money(text_list, text_sub=' dinheiro '):
"""
Args:
----------
text_list: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
# Applying regex
pattern = '[R]{0,1}\$[ ]{0,}\d+(,|\.)\d+'
return [re.sub(pattern, text_sub, r) for r in text_list]
# [RegEx] Padrão para encontrar números
def re_numbers(text_list, text_sub=' numero '):
"""
Args:
----------
text_series: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
# Applying regex
return [re.sub('[0-9]+', text_sub, r) for r in text_list]
# [RegEx] Padrão para encontrar a palavra "não" em seus mais diversos formatos
def re_negation(text_list, text_sub=' negação '):
"""
Args:
----------
text_series: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
# Applying regex
return [re.sub('([nN][ãÃaA][oO]|[ñÑ]| [nN] )', text_sub, r) for r in text_list]
# [RegEx] Padrão para limpar caracteres especiais
def re_special_chars(text_list, text_sub=' '):
"""
Args:
----------
text_series: list object with text content to be prepared [type: list]
text_sub: string or pattern to substitute the regex pattern [type: string]
"""
# Applying regex
return [re.sub('\W', text_sub, r) for r in text_list]
# [RegEx] Padrão para limpar espaços adicionais
def re_whitespaces(text_list):
"""
Args:
----------
text_series: list object with text content to be prepared [type: list]
"""
# Applying regex
white_spaces = [re.sub('\s+', ' ', r) for r in text_list]
white_spaces_end = [re.sub('[ \t]+$', '', r) for r in white_spaces]
return white_spaces_end
# [StopWords] Função para remoção das stopwords e transformação de texto em minúsculas
def stopwords_removal(text, cached_stopwords=stopwords.words('portuguese')):
"""
Args:
----------
text: list object where the stopwords will be removed [type: list]
cached_stopwords: stopwords to be applied on the process [type: list, default: stopwords.words('portuguese')]
"""
return [c.lower() for c in text.split() if c.lower() not in cached_stopwords]
# [Stemming] Função para aplicação de processo de stemming nas palavras
def stemming_process(text, stemmer=RSLPStemmer()):
"""
Args:
----------
text: list object where the stopwords will be removed [type: list]
stemmer: type of stemmer to be applied [type: class, default: RSLPStemmer()]
"""
return [stemmer.stem(c) for c in text.split()]
"""
-----------------------------------
------ 2. TEXT TRANSFORMERS -------
2.2 Classes
-----------------------------------
"""
# [TEXT PREP] Classe para aplicar uma série de funções RegEx definidas em um dicionário
class ApplyRegex(BaseEstimator, TransformerMixin):
def __init__(self, regex_transformers):
self.regex_transformers = regex_transformers
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Applying all regex functions in the regex_transformers dictionary
for regex_name, regex_function in self.regex_transformers.items():
X = regex_function(X)
return X
# [TEXT PREP] Classe para aplicar a remoção de stopwords em um corpus
class StopWordsRemoval(BaseEstimator, TransformerMixin):
def __init__(self, text_stopwords):
self.text_stopwords = text_stopwords
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return [' '.join(stopwords_removal(comment, self.text_stopwords)) for comment in X]
# [TEXT PREP] Classe para aplicar o processo de stemming em um corpus
class StemmingProcess(BaseEstimator, TransformerMixin):
def __init__(self, stemmer):
self.stemmer = stemmer
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return [' '.join(stemming_process(comment, self.stemmer)) for comment in X]
"""
-----------------------------------
-------- 3. CLASSIFICATION --------
3.1 Binary Classification
-----------------------------------
"""
class BinaryClassification:
"""
This class makes the work on binary clasification models easier by bringing useful functions for training,
purposing search on hyperparameters space, evaluating metrics and much more
"""
def __init__(self):
self.classifiers_info = {}
def fit(self, classifiers, X, y, approach='', random_search=False, scoring='roc_auc', cv=5, verbose=5, n_jobs=-1):
"""
This function receives information from classifiers to be trained, the data to be used on training and other
parameters for fitting the model to the data.
Parameters
----------
:param classifiers: dictionary containing estimators and hyperparameters inner dict [type: dict]
:param X: object containing features already prepared for training the model [type: np.array]
:param y: object containing the model target variable [type: np.array]
:param approach: string to be added on model's name as sufix for identifying purposes [type: string, default: '']
:param random_search: guides the application of Randomized Search on training [type: bool, default: True]
:param scoring: scoring metric to be optimized on random search [type: string, default: 'roc_auc']
:param cv: K-folds used on random search cross-validation [type: int, default: 5]
:param verbose: verbose param from RandomizedSearchCV [type: int, default: 5]
:param n_jobs: n_jobs param from RandomizedSarchCV [type: int, default: -1]
Return
------
This method don't return anything but it fills some class attributes like self.classifiers_info dict
Application
-----------
# Creating dictionary object for storing models information
set_classifiers = {
'ModelName': {
'model': ClassifierEstimator(),
'params': clf_dict_params
}
}
trainer = BinaryClassifierAnalysis()
trainer.fit(set_classifiers, X_train_prep, y_train, random_search=True, cv=5)
"""
# Iterating trough every model in the dictionary of classifiers
for model_name, model_info in classifiers.items():
clf_key = model_name + approach
#print(f'Training model {clf_key}\n')
# Creating an empty dict for storing model information
self.classifiers_info[clf_key] = {}
# Application of RandomizedSearchCV
if random_search:
rnd_search = RandomizedSearchCV(model_info['model'], model_info['params'], scoring=scoring, cv=cv,
verbose=verbose, random_state=42, n_jobs=n_jobs)
rnd_search.fit(X, y)
# Saving the best estimator into the model's dict
self.classifiers_info[clf_key]['estimator'] = rnd_search.best_estimator_
else:
self.classifiers_info[clf_key]['estimator'] = model_info['model'].fit(X, y)
def compute_train_performance(self, model_name, estimator, X, y, cv=5):
"""
This function applies cross validation to retrieve useful metrics for the classification model.
In practice, this function would be called by another one (usually with compute_test_performance as well)
Parameters
----------
:param model_name: key-string that identifies a model at self.classifiers_info dict [type: string]
:param estimator:
:param X: object containing features already prepared for training the model [type: np.array]
:param y: object containing the model target variable [type: np.array]
:param cv: k-folds for cross validation application on training evaluation
Return
------
:return train_performance: DataFrame containing model metrics calculated using cross validation
Application
-----------
# Evaluating training performance using cross-validation
df_performances = trainer.compute_train_performance('DecisionTrees', trained_model, X_train, y_train, cv=5)
"""
# Computing metrics using cross validation
t0 = time.time()
accuracy = cross_val_score(estimator, X, y, cv=cv, scoring='accuracy').mean()
precision = cross_val_score(estimator, X, y, cv=cv, scoring='precision').mean()
recall = cross_val_score(estimator, X, y, cv=cv, scoring='recall').mean()
f1 = cross_val_score(estimator, X, y, cv=cv, scoring='f1').mean()
# Probas for calculating AUC
try:
y_scores = cross_val_predict(estimator, X, y, cv=cv, method='decision_function')
except:
# Tree based models don't have 'decision_function()' method, but 'predict_proba()'
y_probas = cross_val_predict(estimator, X, y, cv=cv, method='predict_proba')
y_scores = y_probas[:, 1]
auc = roc_auc_score(y, y_scores)
# Saving scores on self.classifiers_info dictionary
self.classifiers_info[model_name]['train_scores'] = y_scores
# Creating a DataFrame with metrics
t1 = time.time()
delta_time = t1 - t0
train_performance = {}
train_performance['model'] = model_name
train_performance['approach'] = f'Treino {cv} K-folds'
train_performance['acc'] = round(accuracy, 4)
train_performance['precision'] = round(precision, 4)
train_performance['recall'] = round(recall, 4)
train_performance['f1'] = round(f1, 4)
train_performance['auc'] = round(auc, 4)
train_performance['total_time'] = round(delta_time, 3)
return pd.DataFrame(train_performance, index=train_performance.keys()).reset_index(drop=True).loc[:0, :]
def compute_test_performance(self, model_name, estimator, X, y):
"""
This function retrieves metrics from the trained model on test data.
In practice, this function would be called by another one (usually with compute_train_performance as well)
Parameters
----------
:param model_name: key-string that identifies a model at self.classifiers_info dict [type: string]
:param estimator:
:param X: object containing features already prepared for training the model [type: np.array]
:param y: object containing the model target variable [type: np.array]
Return
------
:return test_performance: DataFrame containing model metrics calculated on test data
Application
-----------
# Evaluating test data performance
df_performances = trainer.compute_test_performance('DecisionTrees', trained_model, X_train, y_train)
"""
# Predicting data using the trained model and computing probabilities
t0 = time.time()
y_pred = estimator.predict(X)
y_proba = estimator.predict_proba(X)
y_scores = y_proba[:, 1]
# Retrieving metrics using test data
accuracy = accuracy_score(y, y_pred)
precision = precision_score(y, y_pred)
recall = recall_score(y, y_pred)
f1 = f1_score(y, y_pred)
auc = roc_auc_score(y, y_scores)
# Saving probabilities on treined classifiers dictionary
self.classifiers_info[model_name]['test_scores'] = y_scores
# Creating a DataFrame with metrics
t1 = time.time()
delta_time = t1 - t0
test_performance = {}
test_performance['model'] = model_name
test_performance['approach'] = f'Teste'
test_performance['acc'] = round(accuracy, 4)
test_performance['precision'] = round(precision, 4)
test_performance['recall'] = round(recall, 4)
test_performance['f1'] = round(f1, 4)
test_performance['auc'] = round(auc, 4)
test_performance['total_time'] = round(delta_time, 3)
return pd.DataFrame(test_performance, index=test_performance.keys()).reset_index(drop=True).loc[:0, :]
def evaluate_performance(self, X_train, y_train, X_test, y_test, cv=5, save=False, overwrite=True,
performances_filepath='model_performances.csv'):
"""
This function centralizes the evaluating metric process by calling train and test evaluation functions.
Parameters
----------
:param X_train: training data to be used on evaluation [np.array]
:param y_train: training target variable to be used on evaluation [type: np.array]
:param X_test: testing data to be used on evaluation [np.array]
:param y_test: testing target variable to be used on evaluation [type: np.array]
:param cv: K-folds used on cross validation step [type: int, default: 5]
:param save: flag that guides saving the final DataFrame with metrics [type: bool, default: False]
:param overwrite: flag that guides the overwriting of a saved metrics file [type: bool, default: True]
:param performances_filepath: path reference for saving model performances dataset [type: string,
default: 'model_performances.csv']
Return
------
:return df_performance: DataFrame containing model metrics calculated on training and test data
Application
-----------
# Evaluating performance on training and testint
df_performance = trainer.evaluate_performance(X_train, y_train, X_test, y_test, save=True)
"""
# Iterating over each trained classifier at classifiers_info dictionary
df_performances = pd.DataFrame({})
for model_name, model_info in self.classifiers_info.items():
# Validating if the model was already trained (the key 'train_performance' will be at model_info dict if so)
if 'train_performance' in model_info.keys():
df_performances = df_performances.append(model_info['train_performance'])
df_performances = df_performances.append(model_info['test_performance'])
continue
# Returning the estimator for calling the evaluation functions
#print(f'Evaluating model {model_name}\n')
estimator = model_info['estimator']
# Retrieving training and testing metrics by calling inner functions
train_performance = self.compute_train_performance(model_name, estimator, X_train, y_train, cv=cv)
test_performance = self.compute_test_performance(model_name, estimator, X_test, y_test)
# Putting results on model's dictionary (classifiers_info)
self.classifiers_info[model_name]['train_performance'] = train_performance
self.classifiers_info[model_name]['test_performance'] = test_performance
# Building and unique DataFrame with performances retrieved
model_performance = train_performance.append(test_performance)
df_performances = df_performances.append(model_performance)
# Saving some attributes on model_info dictionary for further access
model_data = {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
}
model_info['model_data'] = model_data
# Saving the metrics file if applicable
if save:
# Adding information of measuring and execution time
cols_performance = list(df_performances.columns)
df_performances['anomesdia'] = datetime.now().strftime('%Y%m%d')
df_performances['anomesdia_datetime'] = datetime.now()
df_performances = df_performances.loc[:, ['anomesdia', 'anomesdia_datetime'] + cols_performance]
# Validating overwriting or append on data already saved
if overwrite:
df_performances.to_csv(performances_filepath, index=False)
else:
try:
# If overwrite is False, tries reading existing metrics data and applying append on it
log_performances = pd.read_csv(performances_filepath)
full_performances = log_performances.append(df_performances)
full_performances.to_csv(performances_filepath, index=False)
except FileNotFoundError:
print('Log de performances do modelo não existente no caminho especificado. Salvando apenas o atual.')
df_performances.to_csv(performances_filepath, index=False)
return df_performances
def feature_importance_analysis(self, features, specific_model=None, graph=True, ax=None, top_n=30,
palette='viridis', save=False, features_filepath='features_info.csv'):
"""
This function retrieves the feature importance from a given model. It can also build a bar chart
for top_n most important features and plot it on the notebook.
Paramters
---------
:param features: list of model features used on training [type: list]
:param specific_model: information that guides the returning of feature importance for a specific model*
:param graph: flag that guides bar chart plotting at the end of execution [type: bool, default: True]
:param ax: axis for plotting the bar chart [type: matplotlib.axes, default: None]
:param top_n: parameter for showing up just top most important features [type: int, default: 30]
:param palette: color configuration for feature importance bar chart [type: string, default: 'viridis']
:param save: flag for saving the dataset returned [type: bool, default: False]
:param features_filepath: path for saving the feature iportance dataset [type: string, default: 'features_info.csv']
Returns
-------
:return: model_feature_importance: pandas DataFrame with feature importances extracted by trained models
"""
# Iterating over each trained classifiers on classifiers_info dictionary
feat_imp = pd.DataFrame({})
all_feat_imp = pd.DataFrame({})
for model_name, model_info in self.classifiers_info.items():
# Creating a pandas DataFrame with model feature importance
try:
importances = model_info['estimator'].feature_importances_
except:
# If the given model doesn't have the feature_importances_ method, just continue for the next
continue
# Preparing the dataset with useful information
feat_imp['feature'] = features
feat_imp['importance'] = importances
feat_imp['anomesdia'] = datetime.now().strftime('%Y%m')
feat_imp['anomesdia_datetime'] = datetime.now()
feat_imp.sort_values(by='importance', ascending=False, inplace=True)
feat_imp.reset_index(drop=True, inplace=True)
# Saving the feature iportance at model's dictionary (classifiers_info)
self.classifiers_info[model_name]['feature_importances'] = feat_imp
all_feat_imp = all_feat_imp.append(feat_imp)
all_feat_imp['model'] = model_name
# Retrieving feature importance for a specific model
if specific_model is not None:
try:
model_feature_importance = self.classifiers_info[specific_model]['feature_importances']
if graph:
# Plotting the bar chart
sns.barplot(x='importance', y='feature', data=model_feature_importance.iloc[:top_n, :],
ax=ax, palette=palette)
format_spines(ax, right_border=False)
ax.set_title(f'Top {top_n} {specific_model} Features mais Relevantes', size=14, color='dimgrey')
# Saving features for a specific model
if save:
model_feature_importance['model'] = specific_model
order_cols = ['anomesdia', 'anomesdia_datetime', 'model', 'feature', 'importance']
model_feature_importance = model_feature_importance.loc[:, order_cols]
model_feature_importance.to_csv(features_filepath, index=False)
return model_feature_importance
except:
# Exception raised if the "specific_model" param doesn't match with any model's dictionary key
print(f'Classificador {specific_model} não existente nas chaves de classificadores treinados.')
print(f'Opções possíveis: {list(self.classifiers_info.keys())}')
return None
else:
# Validating feature importance saving if not passing specific_model param
if save:
order_cols = ['anomesdia', 'anomedia_datetime', 'model', 'feature', 'importance']
all_feat_imp = all_feat_imp.loc[:, order_cols]
all_feat_imp.to_csv(features_filepath, index=False)
return all_feat_imp
# Non-matching param combination (it can't be possible plotting bar chart for all models)
if graph and specific_model is None:
print('Por favor, escolha um modelo específico para visualizar o gráfico das feature importances')
return None
def plot_roc_curve(self, figsize=(16, 6)):
"""
This function iterates over each estimator in classifiers_info dictionary and plots the ROC Curve for
each one for training (first axis) and testing data (second axis)
Paramaters
----------
:param figsize: figure size for the plot [type: tuple, default: (16, 6)]
Returns
-------
This function doesn't return anything but the matplotlib plot for ROC Curve
Application
-----------
trainer.plot_roc_curve()
"""
# Creating matplotlib figure and axis for ROC Curve plot
fig, axs = plt.subplots(ncols=2, figsize=figsize)
# Iterating over trained models
for model_name, model_info in self.classifiers_info.items():
# Returning y data for the model (training and testing)
y_train = model_info['model_data']['y_train']
y_test = model_info['model_data']['y_test']
# Returning scores already calculated after performance evaluation
train_scores = model_info['train_scores']
test_scores = model_info['test_scores']
# Calculating false positives and true positives rate
train_fpr, train_tpr, train_thresholds = roc_curve(y_train, train_scores)
test_fpr, test_tpr, test_thresholds = roc_curve(y_test, test_scores)
# Returning the auc metric for training and testing already calculated after model evaluation
train_auc = model_info['train_performance']['auc'].values[0]
test_auc = model_info['test_performance']['auc'].values[0]
# Plotting graph (training data)
plt.subplot(1, 2, 1)
plt.plot(train_fpr, train_tpr, linewidth=2, label=f'{model_name} auc={train_auc}')
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([-0.02, 1.02, -0.02, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'ROC Curve - Train Data')
plt.legend()
# Plotting graph (testing data)
plt.subplot(1, 2, 2)
plt.plot(test_fpr, test_tpr, linewidth=2, label=f'{model_name} auc={test_auc}')
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([-0.02, 1.02, -0.02, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'ROC Curve - Test Data', size=12)
plt.legend()
plt.show()
def custom_confusion_matrix(self, model_name, y_true, y_pred, classes, cmap, normalize=False):
"""
This function is used for plotting and customizing a confusion matrix for a specific model. In practice,
this function can be called by a top level one for plotting matrix for many models.
Parameters
----------
:param model_name: key reference for extracting model's estimator from classifiers_dict [type: string]
:param y_true: label reference for the target variable [type: np.array]
:param y_pred: array of predictions given by the respective model [type: np.array]
:param classes: alias for classes [type: string]
:param cmap: this parameters guides the colorway for the matrix [type: matplotlib.colormap]
:param normalize: normalizes the entries for the matrix [type: bool, default: False]
Returns
-------
:return: This functions doesn't return any object besides of plotting the confusion matrix
Application
-----------
Please refer to the self.plot_confusion_matrix() function
"""
# Returning a confusion matrix given the labels and predictions passed as args
conf_mx = confusion_matrix(y_true, y_pred)
# Plotando matriz
plt.imshow(conf_mx, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
# Customizando eixos
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
# Customizando entradas
fmt = '.2f' if normalize else 'd'
thresh = conf_mx.max() / 2.
for i, j in itertools.product(range(conf_mx.shape[0]), range(conf_mx.shape[1])):
plt.text(j, i, format(conf_mx[i, j]),
horizontalalignment='center',
color='white' if conf_mx[i, j] > thresh else 'black')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.title(f'{model_name}\nConfusion Matrix', size=12)
def plot_confusion_matrix(self, classes, normalize=False, cmap=plt.cm.Blues):
"""
This function plots a confusion matrix for training and testing data for each classifier at
self.classifiers_dict dictionary
Parameters
----------
:param classes: labels for the target variable [type: string]
:param normalize: flag that guides the normalization of matrix values [type: bool, default: False]
:param cmap: param that colorizes the matrix [type: plt.cm, default: plt.cm.Blues]
Returns
-------
This function doesn't return anything but the matplotlib plot for confusion matrix
"""
# Defining parameters for ploting
k = 1
nrows = len(self.classifiers_info.keys())
fig = plt.figure(figsize=(10, nrows * 4))
sns.set(style='white', palette='muted', color_codes=True)
# Iterating over each classifier
for model_name, model_info in self.classifiers_info.items():
# Returning data from each model
X_train = model_info['model_data']['X_train']
y_train = model_info['model_data']['y_train']
X_test = model_info['model_data']['X_test']
y_test = model_info['model_data']['y_test']
# Making predictions for training (cross validation) and testing for returning confusion matrix
train_pred = cross_val_predict(model_info['estimator'], X_train, y_train, cv=5)
test_pred = model_info['estimator'].predict(X_test)
# Plotting matrix (training data)
plt.subplot(nrows, 2, k)
self.custom_confusion_matrix(model_name + ' Train', y_train, train_pred, classes=classes, cmap=cmap,
normalize=normalize)
k += 1
# Plotting matrix (testing data)
plt.subplot(nrows, 2, k)
self.custom_confusion_matrix(model_name + ' Test', y_test, test_pred, classes=classes, cmap=plt.cm.Greens,
normalize=normalize)
k += 1
plt.tight_layout()
plt.show()
def plot_learning_curve(self, model_name, ax, ylim=None, cv=5, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 10)):
"""
This function calculates and plots the learning curve for a trained model
Parameters
----------
:param model_name: Key reference for extracting an estimator from classifiers_dict dictionary [type: string]
:param ax: axis reference for plotting the learning curve [type: matplotlib.axis]
:param ylim: configuration of the limit on plot vertical axis [type: int, default: None]
:param cv: k-folds used on cross validation [type: int, default: 5]
:param n_jobs: number of cores used on retrieving the learning curve params [type: int, default: 1]
:param train_sizes: array that guides the steps bins used on learning curve [type: np.array,
default:np.linspace(.1, 1.0, 10)]
Returns
-------
This function doesn't return anything but the matplotlib plot for the learning curve
Application
-----------
# Plotting the learning curve for a specific model
fig, ax = plt.subplots(figsize=(16, 6))
trainer.plot_learning_curve(model_name='LightGBM', ax=ax)
"""
# Returning the model to be evaluated
try:
model = self.classifiers_info[model_name]
except:
print(f'Classificador {model_name} não foi treinado.')
print(f'Opções possíveis: {list(self.classifiers_info.keys())}')
return None
# Returning useful data for the model
X_train = model['model_data']['X_train']
y_train = model['model_data']['y_train']
# Calling the learning curve model for retrieving the scores for training and validation
train_sizes, train_scores, val_scores = learning_curve(model['estimator'], X_train, y_train, cv=cv,
n_jobs=n_jobs, train_sizes=train_sizes)
# Computing averages and standard deviation (training and validation)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
val_scores_mean = np.mean(val_scores, axis=1)
val_scores_std = np.std(val_scores, axis=1)
# Results on training data
ax.plot(train_sizes, train_scores_mean, 'o-', color='navy', label='Training Score')
ax.fill_between(train_sizes, (train_scores_mean - train_scores_std), (train_scores_mean + train_scores_std),
alpha=0.1, color='blue')
# Results on cross validation
ax.plot(train_sizes, val_scores_mean, 'o-', color='red', label='Cross Val Score')
ax.fill_between(train_sizes, (val_scores_mean - val_scores_std), (val_scores_mean + val_scores_std),
alpha=0.1, color='crimson')
# Customizing graph
ax.set_title(f'Model {model_name} - Learning Curve', size=14)
ax.set_xlabel('Training size (m)')
ax.set_ylabel('Score')
ax.grid(True)
ax.legend(loc='best')
def plot_score_distribution(self, model_name, shade=False):
"""
This function plots a kdeplot for training and testing data splitting by target class
Parameters
----------
:param model_name: key reference for the trained model [type: string]
:param shade: shade param for seaborn's kdeplot [type: bool, default: False]
Returns
-------
This function doesn't return anything but the matplotlib plot for the score distribution
Application
-----------
# Ploting scores distribution for a model
plot_score_distribution(model_name='LightGBM', shade=True)
"""
# Returning the model to be evaluated
try:
model = self.classifiers_info[model_name]
except:
print(f'Classificador {model_name} não foi treinado.')
print(f'Opções possíveis: {list(self.classifiers_info.keys())}')
return None
# Retrieving y array for training and testing data
y_train = model['model_data']['y_train']
y_test = model['model_data']['y_test']
# Retrieving training and testing scores
train_scores = model['train_scores']
test_scores = model['test_scores']
# Plotting scores distribution
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 5))
sns.kdeplot(train_scores[y_train == 1], ax=axs[0], label='y=1', shade=shade, color='darkslateblue')
sns.kdeplot(train_scores[y_train == 0], ax=axs[0], label='y=0', shade=shade, color='crimson')
sns.kdeplot(test_scores[y_test == 1], ax=axs[1], label='y=1', shade=shade, color='darkslateblue')
sns.kdeplot(test_scores[y_test == 0], ax=axs[1], label='y=0', shade=shade, color='crimson')
# Customizing plots
format_spines(axs[0], right_border=False)
format_spines(axs[1], right_border=False)
axs[0].set_title('Score Distribution - Training Data', size=12, color='dimgrey')
axs[1].set_title('Score Distribution - Testing Data', size=12, color='dimgrey')
plt.suptitle(f'Score Distribution: a Probability Approach for {model_name}\n', size=14, color='black')
plt.show()
def plot_score_bins(self, model_name, bin_range):
"""
This function plots a score distribution based on quantity of each class in a specific bin_range set
Parameters
----------
:param model_name: key reference for the trained model [type: string]
:param bin_range: defines a range of splitting the bins array [type: float]
Returns
-------
This function doesn't return anything but the matplotlib plot for the score bins distribution
Application
-----------
# Ploting scores distribution for a model in another approach
plot_score_bins(model_name='LightGBM', bin_range=0.1)
"""
# Returning the model to be evaluated
try:
model = self.classifiers_info[model_name]
except:
print(f'Classificador {model_name} não foi treinado.')
print(f'Opções possíveis: {list(self.classifiers_info.keys())}')
return None
# Creating the bins array
bins = np.arange(0, 1.01, bin_range)
bins_labels = [str(round(list(bins)[i - 1], 2)) + ' a ' + str(round(list(bins)[i], 2)) for i in range(len(bins))
if i > 0]
# Retrieving the train scores and creating a DataFrame
train_scores = model['train_scores']
y_train = model['model_data']['y_train']
df_train_scores = pd.DataFrame({})
df_train_scores['scores'] = train_scores
df_train_scores['target'] = y_train
df_train_scores['faixa'] = pd.cut(train_scores, bins, labels=bins_labels)
# Computing the distribution for each bin
df_train_rate = | pd.crosstab(df_train_scores['faixa'], df_train_scores['target']) | pandas.crosstab |
from pathlib import Path
import pytest
import numpy as np
import pandas as pd
from jinja2 import Template
from ploomber.clients import SQLAlchemyClient
from ploomber import testing
def test_can_check_nulls(tmp_directory):
client = SQLAlchemyClient('sqlite:///' + str(Path(tmp_directory, 'db.db')))
df = | pd.DataFrame({'no_nas': [1, 2, 1], 'nas': [1, np.nan, 1]}) | pandas.DataFrame |
#!/bin/python3
import math
import os
import random
import re
import sys
import pandas as pd
import numpy as np
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
#
# Complete the 'case1' function below.
#
# The function accepts STRING_ARRAY fp_data as parameter.
#
def case1(financial_data):
# Print First 5 rows of MSFT
# Print Last 5 rows of MSFT
# Print Describe MSFT
print(financial_data.head())
print(financial_data.tail())
print(financial_data.describe())
return None
def case2(financial_data):
# Resample to monthly data
# Display the first 5 rows
monthly = financial_data.resample('M').mean()
print(monthly.head(5))
return None
def case3(financial_data):
# Create a variable daily_close and copy Adj Close from financial_data
# Print daily returns
daily_close = financial_data['Adj Close']
daily_returns = pd.DataFrame({'Adj Close': daily_close.pct_change()})
print(daily_returns.iloc[1:, :])
return None
def case4(financial_data):
# Calculate the cumulative daily returns
# Print it
cum_daily_return = (1 + financial_data['Adj Close'].pct_change()).cumprod()
cum_daily_return = pd.DataFrame({'Adj Close': cum_daily_return})
print(cum_daily_return.iloc[1:, :])
return None
def case5(financial_data):
# Resample the cumulative daily return to cumulative monthly return
cum_daily_return = (1 + financial_data['Adj Close'].pct_change()).cumprod()
cum_monthly_return = cum_daily_return.resample("M").mean()
cum_monthly_return = pd.DataFrame({'Adj Close': cum_monthly_return})
print(cum_monthly_return)
return None
def case6(financial_data):
# Isolate the adjusted closing prices and store it in a variable
# Calculate the moving average for a window of 20
adj_close = financial_data['Adj Close']
moving_avg = adj_close.rolling(window=20).mean()
moving_avg = pd.DataFrame({'Adj Close': moving_avg})
print(moving_avg)
return None
def case7(financial_data):
# Calculate the volatility for a period of 100 don't forget to multiply by square root
# don't forget that you need to use pct_change
returns = financial_data['Adj Close'].pct_change()
volatility = returns.rolling(100).std()
volatility = volatility * np.sqrt(100)
volatility = pd.DataFrame({'Adj Close': volatility})
print(volatility)
return None
def case8(financial_data):
# Initialize the short rolling window (window=50)
# Initialize the long rolling window (window=100)
short_window = 50
long_window = 100
# You will create a signals dataframe
# using the index of financial_data
signals = pd.DataFrame(index=financial_data.index)
# You will assign 0 to the column signal of the dataframe signals
signals['signal'] = 0.0
# Create short simple moving average over the short window
signals['short_mavg'] = financial_data['Close'].rolling(window=short_window,
min_periods=1).mean()
# Create long simple moving average over the long window
signals['long_mavg'] = financial_data['Close'].rolling(window=long_window,
min_periods=1).mean()
# You will not populate the value 1 when the small window moving average
# is higher than the long window moving average else 0
signals['signal'][short_window:] = np.where(signals['short_mavg'][short_window:]
> signals['long_mavg'][short_window:],
1.0, 0.0)
# Generate trading orders by inserting in a new column orders
# 1 if it is a buy order -1 if it is a sell order
# you should just use the diff command on the column signal
signals['orders'] = signals['signal'].diff()
# Print the dataframe signals
print(signals)
return None
def case9(financial_data):
# You will need to use the dataframe signals
short_window = 50
long_window = 100
signals = pd.DataFrame(index=financial_data.index)
signals['signal'] = 0.0
signals['short_mavg'] = financial_data['Close'].rolling(window=short_window,
min_periods=1).mean()
signals['long_mavg'] = financial_data['Close'].rolling(window=long_window,
min_periods=1).mean()
signals['signal'][short_window:] = np.where(signals['short_mavg'][short_window:]
> signals['long_mavg'][short_window:],
1.0, 0.0)
signals['orders'] = signals['signal'].diff()
# You are going to set your initial amount of money you want
# to invest --- here it is 10,000
initial_money = 10000.0
# You are going to create a new dataframe positions
# Remember the index is still the same as signals
positions = | pd.DataFrame(index=signals.index) | pandas.DataFrame |
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return [self.copy() if copy else self]
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.get('align', True):
align_copy = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.get('align', True):
align_copy = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
applied = getattr(b, f)(**kwargs)
if isinstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex)
if isinstance(result, list):
new_rb.extend(result)
else:
new_rb.append(result)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not | isnull(item) | pandas.core.common.isnull |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = pd.Timestamp('2022-04-04') # Start of Easter Break
t33 = pd.Timestamp('2022-04-17') # End of Easter Break
t34 = pd.Timestamp('2022-07-01') # Start of summer holidays
t35 = pd.Timestamp('2022-09-01') # End of summer holidays
t36 = pd.Timestamp('2022-09-21') # Opening of universities
t37 = pd.Timestamp('2022-10-31') # Start of autumn break
t38 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
# End of autumn break --> Date of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t25 < t <= t25 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t25, 5)
elif t25 + pd.Timedelta(5, unit='D') < t <= t26:
# End easing in leisure restrictions --> Early schools closure before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t26 < t <= t27:
# Early schools closure before Christmas holiday --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t27 < t <= t28:
# Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario]-0.2, leisure=scenarios_leisure[scenario], transport=scenarios_work[scenario]-0.2, school=0)
elif t28 < t <= t29:
# Christmass holiday --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t29 < t <= t30:
# End of Measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1, work=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t31 < t <= t32:
# Spring Break --> Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t32 < t <= t33:
# Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t33 < t <= t34:
# Easter --> Summer
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t37 < t <= t38:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
###################
## Spatial model ##
###################
def policies_all_spatial(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
t24 = pd.Timestamp('2021-12-26') # Start of Christmass break
t25 = pd.Timestamp('2022-01-06') # End of Christmass break
t26 = pd.Timestamp('2022-02-28') # Start of Spring Break
t27 = pd.Timestamp('2022-03-06') # End of Spring Break
t28 = pd.Timestamp('2022-04-04') # Start of Easter Break
t29 = pd.Timestamp('2022-04-17') # End of Easter Break
t30 = pd.Timestamp('2022-07-01') # Start of summer holidays
t31 = pd.Timestamp('2022-09-01') # End of summer holidays
t32 = pd.Timestamp('2022-09-21') # Opening of universities
t33 = pd.Timestamp('2022-10-31') # Start of autumn break
t34 = pd.Timestamp('2022-11-06') # End of autumn break
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_spatial_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = | pd.Timestamp('2020-05-15') | pandas.Timestamp |
# standard libraries
import os
# third-party libraries
import pandas as pd
# local imports
from .. import count_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestCsvToDf:
"""
Tests converting a csv with various headers into a processible DataFrame
"""
def test_timestamp(self):
"""
Check if a csv w/ a timestamp is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp.csv')
element_id = 'tagID'
timestamp = 'timestamp'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == -6761865716520410554
def test_timestamp_ba(self):
"""
Check if a csv w/ a timestamp and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp_ba.csv')
element_id = 'tagID'
timestamp = 'timestamp'
boardings = 'boardings'
alightings = 'alightings'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7008548250528393651
def test_session(self):
"""
Check if a csv w/ session times is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7098407329788286247
def test_session_ba(self):
"""
Check if a csv w/ session times and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session_ba.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
boardings = 'boardings'
alightings = 'alightings'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 2589903708124850504
class TestStandardizeDatetime:
"""
Tests ensuring all times are datetime format
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already datetime and no change is needed
"""
test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
test_df['timestamp'] = pd.to_datetime(test_df['timestamp'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['timestamp'].dtype == 'datetime64[ns]'
def test_timestamp_epoch(self):
"""
Tests if timestamp is an epoch time
"""
test_times = ['1519330080', '1518199500', '1518200760']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['timestamp'].dtype == 'datetime64[ns]'
def test_session_epoch(self):
"""
Tests if session times are epoch times
"""
test_times = [['1519330080', '1518199500'], ['1518200760', '1519330080'], ['1518199500', '1518200760']]
test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['session_start'].dtype == 'datetime64[ns]'
assert processed_df['session_end'].dtype == 'datetime64[ns]'
class TestStandardizeEpoch:
"""
Tests ensuring all times are unix epoch
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already epochs and no change is needed
"""
test_times = [1519330080, 1518199500, 1518200760]
test_df = pd.DataFrame(test_times, columns=['timestamp'])
processed_df = count_data.standardize_epoch(test_df)
assert processed_df['timestamp'].dtype == 'int64'
def test_timestamp_datetime(self):
"""
Tests if timestamp is a datetime
"""
test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
test_df['timestamp'] = pd.to_datetime(test_df['timestamp'])
processed_df = count_data.standardize_epoch(test_df)
assert processed_df['timestamp'].dtype == 'int64'
def test_session_datetime(self):
"""
Tests if session times are datetimes
"""
test_times = [['2018-02-22 20:08:00', '2018-02-09 18:05:00'], ['2018-02-09 18:26:00', '2018-02-22 20:08:00'],
['2018-02-09 18:05:00', '2018-02-09 18:26:00']]
test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end'])
test_df['session_start'] = pd.to_datetime(test_df['session_start'])
test_df['session_end'] = pd.to_datetime(test_df['session_end'])
processed_df = count_data.standardize_epoch(test_df)
assert processed_df['session_start'].dtype == 'int64'
assert processed_df['session_end'].dtype == 'int64'
class TestSessionLengthFilter:
"""
Tests limiting the length of sessions to be included in candidate sessions
"""
def test_filter_sessions(self):
"""
Tests if dataframes with sessions are correctly filtered
"""
session_max = 100
test_sessions = [[1519330080, 1519330090], [151899500, 1518209500], [1518200760, 1518200770]]
filtered_sessions = [[1519330080, 1519330090], [1518200760, 1518200770]]
test_df = pd.DataFrame(test_sessions, columns=['session_start', 'session_end'])
filtered_df = pd.DataFrame(filtered_sessions, columns=['session_start', 'session_end'])
filtered_test_df = count_data.session_length_filter(test_df, session_max)
assert filtered_test_df.equals(filtered_df)
def test_no_sessions(self):
"""
Tests if dataframes with single timestamps are correctly not changed
"""
session_max = 100
test_timestamps = [1519330080, 1518199500, 1518200760]
test_df = pd.DataFrame(test_timestamps, columns=['timestamp'])
filtered_test_df = count_data.session_length_filter(test_df, session_max)
assert filtered_test_df.equals(test_df)
class TestTimeRangeJoinNp:
"""
Tests range joining two dataframes based on time
"""
def test_d1timestamp_d2session_np(self):
"""
Tests with data1 having a timestamp and data2 having session times
"""
time_range = 100
data1_list = [[1519330080, 'bob1'], [1519330030, 'bob1'], [1518200760, 'sue1']]
data2_list = [[1519330050, 1519330150, 'bob2'], [1518200780, 1518200980, 'sue2'], [1529200760, 1529200790, 'earl2']]
target_list = [[1519330080, 'bob1', 1519330050, 1519330150, 'bob2'],
[1519330030, 'bob1', 1519330050, 1519330150, 'bob2'],
[1518200760, 'sue1', 1518200780, 1518200980, 'sue2']]
data1 = pd.DataFrame(data1_list, columns=['timestamp1', 'name1'])
data2 = pd.DataFrame(data2_list, columns=['session_start2', 'session_end2', 'name2'])
target = pd.DataFrame(target_list, columns=['timestamp1', 'name1', 'session_start2', 'session_end2', 'name2'])
df_range_join = count_data.time_range_join_np(data1, data2, time_range)
assert df_range_join.equals(target)
def test_d1session_d2timestamp_np(self):
"""
Tests with data1 having session times and data2 having a timestamp
"""
time_range = 100
data1_list = [[1519330050, 1519330150, 'bob1'], [1518200780, 1518200980, 'sue1'], [1529200760, 1529200790, 'earl1']]
data2_list = [[1519330080, 'bob2'], [1519330030, 'bob2'], [1518200760, 'sue2']]
target_list = [[1519330050, 1519330150, 'bob1', 1519330080, 'bob2'],
[1519330050, 1519330150, 'bob1', 1519330030, 'bob2'],
[1518200780, 1518200980, 'sue1', 1518200760, 'sue2']]
data1 = pd.DataFrame(data1_list, columns=['session_start1', 'session_end1', 'name1'])
data2 = pd.DataFrame(data2_list, columns=['timestamp2', 'name2'])
target = pd.DataFrame(target_list, columns=['session_start1', 'session_end1', 'name1', 'timestamp2', 'name2'])
df_range_join = count_data.time_range_join_np(data1, data2, time_range)
assert df_range_join.equals(target)
class TestTimeRangeJoinSql:
"""
Tests range joining two dataframes based on time
"""
def test_d1timestamp_d2session_sql(self):
"""
Tests with data1 having a timestamp and data2 having session times
"""
time_range = 100
data1_list = [[1519330080, 'bob1'], [1519330030, 'bob1'], [1518200760, 'sue1']]
data2_list = [[1519330050, 1519330150, 'bob2'], [1518200780, 1518200980, 'sue2'], [1529200760, 1529200790, 'earl2']]
target_list = [[1519330080, 'bob1', 1519330050, 1519330150, 'bob2'],
[1519330030, 'bob1', 1519330050, 1519330150, 'bob2'],
[1518200760, 'sue1', 1518200780, 1518200980, 'sue2']]
data1 = pd.DataFrame(data1_list, columns=['timestamp1', 'name1'])
data2 = pd.DataFrame(data2_list, columns=['session_start2', 'session_end2', 'name2'])
target = pd.DataFrame(target_list, columns=['timestamp1', 'name1', 'session_start2', 'session_end2', 'name2'])
df_range_join = count_data.time_range_join_sql(data1, data2, time_range)
assert df_range_join.equals(target)
def test_d1session_d2timestamp_sql(self):
"""
Tests with data1 having session times and data2 having a timestamp
"""
time_range = 100
data1_list = [[1519330050, 1519330150, 'bob1'], [1518200780, 1518200980, 'sue1'], [1529200760, 1529200790, 'earl1']]
data2_list = [[1519330080, 'bob2'], [1519330030, 'bob2'], [1518200760, 'sue2']]
target_list = [[1519330050, 1519330150, 'bob1', 1519330080, 'bob2'],
[1519330050, 1519330150, 'bob1', 1519330030, 'bob2'],
[1518200780, 1518200980, 'sue1', 1518200760, 'sue2']]
data1 = pd.DataFrame(data1_list, columns=['session_start1', 'session_end1', 'name1'])
data2 = pd.DataFrame(data2_list, columns=['timestamp2', 'name2'])
target = pd.DataFrame(target_list, columns=['session_start1', 'session_end1', 'name1', 'timestamp2', 'name2'])
df_range_join = count_data.time_range_join_sql(data1, data2, time_range)
assert df_range_join.equals(target)
class TestHaversineDistFilter:
"""
Tests filtering using haversine distance
"""
def test_distance_filter(self):
dist_max = 3000
test_locations = [[44.49, -123.51, 44.51, -123.49], [44.0, -123.0, 43.0, -124.0]]
target_list = [[44.49, -123.51, 44.51, -123.49]]
dataframe = pd.DataFrame(test_locations, columns=['lat1', 'lon1', 'lat2', 'lon2'])
target = pd.DataFrame(target_list, columns=['lat1', 'lon1', 'lat2', 'lon2'])
filtered_df = count_data.haversine_dist_filter(dataframe, dist_max)
assert filtered_df.equals(target)
class TestPairwiseFilter:
"""
Tests the creation of candidate pairs of identifiers based on spatiotemporal filters
"""
def test_pairwise_filter(self):
data1_list = [['bob1', 1519330050, 44.4999, -123.5001], ['bob1', 1519330080, 44.5001, -123.4999], ['sue1', 1519330150, 43.0, -124.0]]
data2_list = [['bob2', 1519330040, 1519330070, 44.50, -123.50], ['jake2', 1519333150, 1519333320, 44.0, -123.0]]
target_list = [['bob1', 1519330050, 44.4999, -123.5001, 'bob2', 1519330040, 1519330070, 44.50, -123.50],
['bob1', 1519330080, 44.5001, -123.4999, 'bob2', 1519330040, 1519330070, 44.50, -123.50]]
data1 = | pd.DataFrame(data1_list, columns=['element_id', 'timestamp', 'lat', 'lon']) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import pytest
from pyam import IamDataFrame, read_datapackage
from pyam.testing import assert_iamframe_equal
from conftest import TEST_DATA_DIR
FILTER_ARGS = dict(scenario='scen_a')
def test_io_csv(test_df):
# write to csv
file = 'testing_io_write_read.csv'
test_df.to_csv(file)
# read from csv
import_df = IamDataFrame(file)
# assert that `data` tables are equal and delete file
pd.testing.assert_frame_equal(test_df.data, import_df.data)
os.remove(file)
@pytest.mark.parametrize("meta_args", [
[{}, {}],
[dict(include_meta='foo'), dict(meta_sheet_name='foo')]
])
def test_io_xlsx(test_df, meta_args):
# add column to `meta`
test_df.set_meta(['a', 'b'], 'string')
# write to xlsx (direct file name and ExcelWriter, see bug report #300)
file = 'testing_io_write_read.xlsx'
for f in [file, pd.ExcelWriter(file)]:
test_df.to_excel(f, **meta_args[0])
if isinstance(f, pd.ExcelWriter):
f.close()
# read from xlsx
import_df = IamDataFrame(file, **meta_args[1])
# assert that IamDataFrame instances are equal and delete file
assert_iamframe_equal(test_df, import_df)
os.remove(file)
@pytest.mark.parametrize("args", [{}, dict(sheet_name='meta')])
def test_load_meta(test_df, args):
file = os.path.join(TEST_DATA_DIR, 'testing_metadata.xlsx')
test_df.load_meta(file, **args)
obs = test_df.meta
dct = {'model': ['model_a'] * 2, 'scenario': ['scen_a', 'scen_b'],
'category': ['imported', np.nan], 'exclude': [False, False]}
exp = pd.DataFrame(dct).set_index(['model', 'scenario'])
| pd.testing.assert_series_equal(obs['exclude'], exp['exclude']) | pandas.testing.assert_series_equal |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), | pd.Series(actual) | pandas.Series |
# coding: utf-8
"""
Aurelio_Amerio_Higgs_v4.py
In this analysis, I have used several MLP models, applied to the Kaggle Higgs dataset,
in order to distinguish signal from noise.
----------------------------------------------------------------------
author: <NAME> (<EMAIL>)
Student ID: QT08313
Date: 03/08/2018
----------------------------------------------------------------------
"""
# # Import data and preprocess it
# In[49]:
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
use_all_features = True
use_full_data = True
test_sml_size = 3000
# In[50]:
#file paths
train_sig_path_sml = "data/train_sml_sig.csv"
train_bg_path_sml = "data/train_sml_bg.csv"
train_sig_path = "data/train_sig.csv"
train_bg_path = "data/train_bg.csv"
test_sig_path = "data/test_sig.csv"
test_bg_path = "data/test_bg.csv"
#read csv
train_sig_sml = pd.read_csv(train_sig_path_sml, header=0)
train_bg_sml = pd.read_csv(train_bg_path_sml, header=0)
train_sig = pd.read_csv(train_sig_path, header=0)
train_bg = pd.read_csv(train_bg_path, header=0)
test_sig = pd.read_csv(test_sig_path, header=0)
test_bg = pd.read_csv(test_bg_path, header=0)
#merge sig and bg dataframes
all_data_train_sml = train_bg_sml.append(train_sig_sml)
all_data_train = train_bg.append(train_sig)
all_data_test = test_bg.append(test_sig)
#one hot encode the labels -> i get two new features: Label_b and Label_s
all_data_train_sml_one_hot = | pd.get_dummies(all_data_train_sml) | pandas.get_dummies |
# This is the Machine learning pipeline for generating the confusion matrices, classification report, errors and AUC.
# Outputs include the following :
# 1. auc_lgr.png
# 2. auc_rf.png
# 3. lgr_clssification.csv
# 4. lgr_train_confusion.csv
# 5. lgr_test_confusion.csv
# 6. rf_clssification.csv
# 7. rf_train_confusion.csv
# 8. rf_test_confusion.csv
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.utils import resample
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import GridSearchCV
import time
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
# Import cleaned data
cleaned = pd.read_csv('data/cleaned_train_data.csv')
cleaned_test = pd.read_csv('data/cleaned_test_data.csv')
# Separate majority and minority classes
majority = cleaned[cleaned.C_SEV==2]
minority = cleaned[cleaned.C_SEV==1]
# Downsample majority class
maj_downsampled = resample(majority,
replace=True, # sample with replacement
n_samples=2559, # to match majority class
random_state=407) # reproducible results
# Combine majority class with upsampled minority class
resampled = | pd.concat([minority, maj_downsampled]) | pandas.concat |
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import math
import time
import ruptures as rpt
from datetime import datetime
data = pd.read_csv("test_anom2.csv")
print(data.head())
data.set_index(np.arange(len(data.index)), inplace=True)
def check_if_shift_v0(data, column_name, start_index, end_index, check_period):
""" using median to see if it changes significantly in shift """
period_before = data[column_name][start_index - check_period: start_index]
period_in_the_middle = data[column_name][start_index:end_index]
period_after = data[column_name][end_index: end_index + check_period]
period_before_median = abs(np.nanmedian(period_before))
period_in_the_middle_median = abs(np.nanmedian(period_in_the_middle))
period_after_median = abs(np.nanmedian(period_after))
upper_threshold = period_in_the_middle_median * 2
down_threshold = period_in_the_middle_median / 2
if (upper_threshold < period_before_median and upper_threshold < period_after_median) or\
(down_threshold > period_before_median and down_threshold > period_after_median):
return True
else:
return False
def prepare_data_to_test(data, data_name: str):
""" datetime type """
data["time"] = | pd.to_datetime(data.time) | pandas.to_datetime |
import argparse
import os
import numpy as np
import pandas as pd
from skimage import io
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input',
type=str,
help='original dataset directory')
parser.add_argument('output',
type=str,
help='patched dataset directory')
parser.add_argument('--format',
type=str,
default='png',
help='file format')
parser.add_argument('--ph',
type=int,
default=300,
help='patch height')
parser.add_argument('--pw',
type=int,
default=300,
help='patch width')
parser.add_argument('--sy',
type=int,
default=150,
help='stride for axis y')
parser.add_argument('--sx',
type=int,
default=150,
help='stride for axis x')
args = parser.parse_args()
return args
def main():
# parse command line arguments
args = parse_args()
# create output path if not exists
os.makedirs(os.path.join(args.output), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format, 'train'), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format, 'train_labels'), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format, 'val'), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format, 'val_labels'), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format, 'test'), exist_ok=True)
os.makedirs(os.path.join(args.output, args.format, 'test_labels'), exist_ok=True)
SPLIT_KEY = 'split'
IMAGE_PATH_KEY = '{}_image_path'.format(args.format)
LABEL_PATH_KEY = '{}_label_path'.format(args.format)
input_metadata_path = os.path.join(args.input, 'metadata.csv')
output_metadata_path = os.path.join(args.output, 'metadata.csv')
input_df = | pd.read_csv(input_metadata_path) | pandas.read_csv |
from trame import update_state
from trame.layouts import SinglePage
from trame.html import vuetify
import pandas as pd
import altair as alt
from xaitk_demo.core import TASKS
from xaitk_demo.ui_helper import (
model_execution,
data_selection,
xai_parameters,
xai_viz,
compact_styles,
combo_styles,
)
# -----------------------------------------------------------------------------
# Main page layout
# -----------------------------------------------------------------------------
layout = SinglePage("xaiTK")
layout.logo.children = [vuetify.VIcon("mdi-brain")]
layout.title.set_text("XaiTK")
layout.toolbar.children += [
vuetify.VSpacer(),
vuetify.VSelect(
label="Task",
v_model=("task_active", "classification"),
items=("task_available", TASKS),
**compact_styles,
**combo_styles,
),
vuetify.VSelect(
label="Model",
v_model=("model_active", ""),
items=("model_available", []),
**compact_styles,
**combo_styles,
),
vuetify.VSelect(
label="Saliency Algorithm",
v_show="saliency_available.length > 1",
v_model=("saliency_active", ""),
items=("saliency_available", []),
**compact_styles,
**combo_styles,
),
vuetify.VProgressLinear(
indeterminate=True,
absolute=True,
bottom=True,
active=("busy",),
),
]
model_content, model_chart = model_execution()
layout.content.children += [
vuetify.VContainer(
fluid=True,
children=[
vuetify.VRow(
classes="d-flex flex-shrink-1",
children=[data_selection(), model_content],
),
vuetify.VRow(
classes="d-flex flex-shrink-1",
children=[xai_parameters(), xai_viz()],
),
],
)
]
# -----------------------------------------------------------------------------
# UI update helper
# -----------------------------------------------------------------------------
def update_prediction(results={}):
# classes
classes = results.get("classes", [])
df = | pd.DataFrame(classes, columns=["Class", "Score"]) | pandas.DataFrame |
"""This module is dedicated to helpers for the Messari class"""
import logging
from typing import Union, List, Dict
import pandas as pd
from messari.utils import validate_input, validate_asset_fields_list_order, find_and_update_asset_field
def fields_payload(asset_fields: Union[str, List],
asset_metric: str = None, asset_profile_metric: str = None):
"""Returns payload with fields parameter.
:param asset_fields: str, list
List of asset fields.
:param asset_metric: str
Single metric string to filter metric data.
:param asset_profile_metric: str
Single profile metric string to filter profile data.
:return String of fields query parameter.
"""
asset_fields = validate_input(asset_fields)
if 'slug' not in asset_fields:
asset_fields = asset_fields + ['slug']
if asset_metric:
if 'metrics' not in asset_fields:
asset_fields = asset_fields + ['metrics']
# Ensure that metric is the last value in asset fields to successfully concatenate url
asset_fields = validate_asset_fields_list_order(asset_fields, 'metrics')
# Update metric in asset fields to include drill down asset metric
asset_fields = find_and_update_asset_field(asset_fields, 'metrics',
'/'.join(['metrics', asset_metric]))
if asset_profile_metric:
if 'profile' not in asset_fields:
asset_fields = asset_fields + ['profile']
# Ensure that metric is the last value in asset fields to successfully concatenate url
asset_fields = validate_asset_fields_list_order(asset_fields, 'profile')
# Update metric in asset fields to include drill down asset metric
asset_fields = find_and_update_asset_field(asset_fields, 'profile',
'/'.join(['profile', asset_profile_metric]))
return ','.join(asset_fields)
def timeseries_to_dataframe(response: Dict) -> pd.DataFrame:
"""Convert timeseries data to pandas dataframe
:param response: dict
Dictionary of asset time series data keyed by symbol
:return: pandas dataframe
"""
df_list, key_list = [], []
for key, value in response.items():
key_list.append(key)
if isinstance(value['values'], list):
df_columns=[f'{name}' for name in value['parameters_columns']]
values_df = | pd.DataFrame.from_records(value['values'], columns=df_columns) | pandas.DataFrame.from_records |
"""Download Library Locations for a given OCLC ID Number."""
# =============================================================================
# Import libraries
# =============================================================================
import logging # For outputting information to the user.
import pandas
import requests # For making HTML requests
import networkx # For network mapping
import matplotlib.pyplot as plt # For drawing plots
# =============================================================================
# Load configuration settings from a file called config.py
# =============================================================================
import config
# We can now get access to our oclc_wskey varible from that file with
# config.oclc_wskey
# =============================================================================
# Other settings
# =============================================================================
oclc_institution_codes = {
"RBN": "Brown University Library",
"ZCU": "Columbia University Libraries",
"COO": "Cornell University Library",
"DRB": "Dartmouth College Library",
"NDD": "Duke University Libraries",
"HLS": "Harvard University Libraries",
"JHE": "Johns Hopkins University Libraries",
"MYG": "Massachusetts Institute of Technology Libraries",
"PUL": "Princeton University Library",
"STF": "Stanford University",
"YUS": "Yale University Library",
"CGU": "University of Chicago Library"
}
# =============================================================================
# Define API query URL
# =============================================================================
# The API we'll be using is the WorldCat Search Library Catalog URLs API
# https://www.oclc.org/developer/develop/web-services/worldcat-search-api/library-catalog-url.en.html
parameters_for_api = {
"frbrGrouping": "off",
"format": "json",
"maximumLibraries": "100",
"wskey": config.oclc_wskey}
comma_separated_oclc_institutions = ','.join(
oclc_institution_codes.keys())
class ErrorWithAPI(Exception):
pass
def create_api_request(
item_identifier,
comma_separated_institution_codes=comma_separated_oclc_institutions,
api_request_parameters=parameters_for_api):
"""Given an item ID and some OCLC institution symbols, query the OCLC
WorldCat Search Library Catalog URLs API"""
print(f'Running API query for item ID "{item_identifier}"...')
# This should NOT have a trailing slash ('/') at the end:
api_base_url = ('http://www.worldcat.org/webservices/catalog/content/'
'libraries/isbn/')
# Update the static api parameters to include the (dynamic) DOI:
api_request_parameters = api_request_parameters.copy()
api_request_parameters['oclcsymbol'] = comma_separated_institution_codes
# Add the item identifier to the URL, so that it looks like this:
api_url = f'{api_base_url}/{item_identifier}'
api_response = requests.get(
api_url,
params=api_request_parameters)
# We'll check the status code below: Possible codes are:
# 200: Successful request
# 400: Bad/malformed request
# 500: Server error
if api_response.status_code != 200:
raise ErrorWithAPI(
f'Problem contacting API: We received Status Code '
'{api_response.status_code}. The full response text is '
'below: {api_response.text}')
logging.info(f'Returning query results from URL "{api_response.url}"')
return api_response
# =============================================================================
# Run the queries for some item IDs
# =============================================================================
# Create some empty dataframes, which we'll fill in below.
library_holdings = | pandas.DataFrame() | pandas.DataFrame |
import asyncio
from predcrash_utils.commons import get_asset_root, get_file_content
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import conda
import os
from logzero import logger as LOGGER
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
from mpl_toolkits.basemap import Basemap
import matplotlib as mpl
mpl.rcParams['font.size'] = 10.
mpl.rcParams['font.family'] = 'Comic Sans MS'
mpl.rcParams['axes.labelsize'] = 8.
mpl.rcParams['xtick.labelsize'] = 6.
mpl.rcParams['ytick.labelsize'] = 6.
class Map_france(object):
def __init__(self, x1=-6., x2=10., y1=41., y2=51.5, figsize=(8, 8)):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.figsize = figsize
@staticmethod
async def make_data(datasets:list, years:list):
cfg = await get_asset_root()
list_directory = []
for year in years:
for dataset in datasets:
list_directory.append(f'{dataset}_{year}')
list_df = []
for directory in list_directory:
dir = await get_file_content(cfg, directory)
df = pd.read_csv(dir, encoding='latin1', index_col='Num_Acc')
list_df.append(df)
df_total = | pd.concat(list_df) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Country data of B.1.1.7 occurrence.
Function: get_country_data().
@author: @hk_nien
"""
import re
from pathlib import Path
import pandas as pd
import datetime
import numpy as np
def _ywd2date(ywd):
"""Convert 'yyyy-Www-d' string to date (12:00 on that day)."""
twelvehours = pd.Timedelta('12 h')
dt = datetime.datetime.strptime(ywd, "%G-W%V-%w") + twelvehours
return dt
def _add_odds_column(df):
df['or_b117'] = df['f_b117'] / (1 + df['f_b117'])
def _convert_ywd_records(records, colnames=('f_b117',)):
"""From records to DataFrame with columns.
Records are tuples with ('yyyy-Www-d', value, ...).
"""
df = pd.DataFrame.from_records(records, columns=('Date',) + tuple(colnames))
df['Date'] = [_ywd2date(r) for r in df['Date']]
df = df.set_index('Date')
if 'f_b117' in colnames and 'or_b117' not in colnames:
_add_odds_column(df)
return df
def set_date_in_records_keys(rrecords):
"""Replace {date} in key by date in 1st item for each, in-place.
e.g. 'NL ({date})' -> 'NL (2021-01-01)'.
"""
keys = list(rrecords.keys())
for k in keys:
new_k = k.format(date=rrecords[k][0]['date'])
record = rrecords[k]
del rrecords[k]
rrecords[new_k] = record
def _add_meta_record(reclist, desc, mdict, refs):
"""Add record to reclist; record is a dict with keys:
desc=desc,
**mdict,
refs->tuple of URLs
mdict['date'] will be converted to DateTime object.
"""
refs = tuple(refs)
rec = {'desc': desc,
**mdict,
'refs': refs,
}
if 'date' in rec:
rec['date'] = pd.to_datetime(rec['date'])
reclist.append(rec)
def _get_data_uk_genomic():
# https://twitter.com/hk_nien/status/1344937884898488322
# data points read from plot (as ln prevalence)
seedata = {
'2020-12-21': [
dict(date='2020-12-21', is_recent=False, is_seq=True, en_part='South East England'),
['https://t.co/njAXPsVlvb?amp=1'],
['2020-09-25', -4.2*1.25],
['2020-10-02', -3.5*1.25],
['2020-10-15', -3.2*1.25],
['2020-10-20', -2.3*1.25],
['2020-10-29', -2.3*1.25],
['2020-11-05', -1.5*1.25],
['2020-11-12', -0.9*1.25],
['2020-11-19', -0.15*1.25],
['2020-11-27', 0.8*1.25]
],
'2020-12-31': [
dict(date='2020-12-31', is_recent=True, is_seq=True, en_part='South East England'),
['https://www.imperial.ac.uk/media/imperial-college/medicine/mrc-gida/2020-12-31-COVID19-Report-42-Preprint-VOC.pdf'
],
['2020-10-31', -2.1],
['2020-11-08', -1.35],
['2020-11-15', -0.75],
['2020-11-22', -0.05],
['2020-11-29', 0.05],
]
}
cdict = {}
meta_records = []
for report_date, records in seedata.items():
df = pd.DataFrame.from_records(records[2:], columns=['Date', 'ln_odds'])
df['Date'] = pd.to_datetime(df['Date'])
odds = np.exp(df['ln_odds'])
df['f_b117'] = odds / (1 + odds)
df = df[['Date', 'f_b117']].set_index('Date')
desc = f'South East England (seq, {report_date})'
cdict[desc] = df
_add_meta_record(meta_records, desc, records[0], records[1])
return cdict, meta_records
def _get_data_countries_weeknos():
"""Countries with f_117 data by week number.
Return dataframe with metadata and dict of dataframes.
"""
# All country records are ('{year}-W{weekno}-{weekday}', fraction_b117)
# Item 0 in each list: metadata
# Item 1 in each list: source URLs
country_records = {
'DK (seq; {date})': [
dict(ccode='DK', date='2021-01-01', is_seq=True, is_recent=False),
['https://covid19.ssi.dk/-/media/cdn/files/opdaterede-data-paa-ny-engelsk-virusvariant-sarscov2-cluster-b117--01012021.pdf?la=da'],
('2020-W49-4', 0.002),
('2020-W50-4', 0.005),
('2020-W51-4', 0.009),
('2020-W52-4', 0.023)
],
'DK (seq; {date})': [
dict(ccode='DK', date='2021-02-14', is_seq=True, is_recent=True),
['https://www.covid19genomics.dk/statistics'],
('2020-W48-4', 0.002),
('2020-W49-4', 0.002),
('2020-W50-4', 0.004),
('2020-W51-4', 0.008),
('2020-W52-4', 0.020),
('2020-W53-4', 0.024),
('2021-W01-4', 0.040), # last updated 2021-02-05
('2021-W02-4', 0.075),
('2021-W03-4', 0.128),
('2021-W04-4', 0.191), # last updated 2021-02-05
('2021-W05-4', 0.271), # last updated before 2021-02-14
],
'NL (seq; {date}; OMT)': [
dict(ccode='NL', date='2021-01-01', is_seq=True, is_recent=False),
['https://www.tweedekamer.nl/kamerstukken/brieven_regering/detail?id=2021Z00794&did=2021D02016',
'https://www.rivm.nl/coronavirus-covid-19/omt'],
('2020-W49-4', 0.011),
('2020-W50-4', 0.007),
('2020-W51-4', 0.011),
('2020-W52-4', 0.014),
('2020-W53-4', 0.052),
('2021-W01-4', 0.119), # preliminary
],
'NL (seq; {date})': [
dict(ccode='NL', date='2021-02-07', is_seq=True, is_recent=True),
['https://www.tweedekamer.nl/kamerstukken/brieven_regering/detail?id=2021Z00794&did=2021D02016',
'https://www.tweedekamer.nl/sites/default/files/atoms/files/20210120_technische_briefing_commissie_vws_presentati_jaapvandissel_rivm_0.pdf',
'https://www.tweedekamer.nl/downloads/document?id=00588209-3f6b-4bfd-a031-2d283129331c&title=98e%20OMT%20advies%20deel%201%20en%20kabinetsreactie',
'https://www.tweedekamer.nl/downloads/document?id=be0cb7fc-e3fd-4a73-8964-56f154fc387e&title=Advies%20n.a.v.%2099e%20OMT%20deel%202.pdf'
],
('2020-W49-5', 0.011), # OMT #96 >>
('2020-W50-5', 0.007),
('2020-W51-5', 0.011),
('2020-W52-5', 0.020),
('2020-W53-5', 0.050), # << OMT #96
('2021-W01-5', 0.090), # TK briefing (read from figure ±0.005)
('2021-W02-5', 0.198), # OMT #98 (31 Jan)
('2021-W03-5', 0.241), # OMT #99
],
'UK (seq; {date})': [
dict(ccode='UK', date='2021-01-21', is_seq=True, is_recent=True),
['https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-risk-related-to-spread-of-new-SARS-CoV-2-variants-EU-EEA-first-update.pdf',
],
# Fig. 2. (traced, +/- 0.001 accuracy)
('2020-W43-4', 0.003),
('2020-W44-4', 0.008),
('2020-W45-4', 0.026),
('2020-W46-4', 0.063),
('2020-W47-4', 0.108),
('2020-W48-4', 0.101),
('2020-W49-4', 0.140),
('2020-W50-4', 0.333),
('2020-W51-4', 0.483),
('2020-W52-4', 0.539),
('2020-W53-4', 0.693),
# ('2021-W01-4', ...),
],
'PT (seq; {date})': [
dict(ccode='PT', date='2021-02-11', is_seq=True, is_recent=True),
['https://virological.org/t/tracking-sars-cov-2-voc-202012-01-lineage-b-1-1-7-dissemination-in-portugal-insights-from-nationwide-rt-pcr-spike-gene-drop-out-data/600',
'https://virological.org/t/tracking-sars-cov-2-voc-202012-01-lineage-b-1-1-7-dissemination-in-portugal-insights-from-nationwide-rt-pcr-spike-gene-drop-out-data/600/4',
'https://virological.org/t/tracking-sars-cov-2-voc-202012-01-lineage-b-1-1-7-dissemination-in-portugal-insights-from-nationwide-rt-pcr-spike-gene-drop-out-data/600/7',
],
('2020-W49-4', 0.019),
('2020-W50-4', 0.009),
('2020-W51-4', 0.013),
('2020-W52-4', 0.019),
('2020-W53-4', 0.032),
('2021-W01-4', 0.074),
('2021-W02-4', 0.133),
('2021-W03-4', 0.247),
('2021-W04-4', 0.365),
('2021-W05-4', 0.427),
],
'CH (seq; {date})': [
dict(ccode='CH', date='2021-02-14', is_seq=True, is_recent=True),
['https://sciencetaskforce.ch/nextstrain-phylogentische-analysen/'],
('2020-W51-4', 0.0004),
('2020-W52-4', 0.0043),
('2020-W53-4', 0.0074),
('2021-W01-4', 0.0153),
('2021-W02-4', 0.0329),
('2021-W03-4', 0.0881),
('2021-W04-4', 0.158), # last updated ca. 2021-02-05
('2021-W05-4', 0.235), # last updated before 2021-02-14
],
# https://assets.gov.ie/121054/55e77ccd-7d71-4553-90c9-5cd6cdee7420.pdf (p. 53) up to wk 1
# https://assets.gov.ie/121662/184e8d00-9080-44aa-af74-dbb13b0dcd34.pdf (p. 2, bullet 8) wk 2/3
'IE (SGTF; {date})': [
dict(ccode='IE', date='2021-02-04', is_seq=False, is_sgtf=True, is_recent=True),
['https://assets.gov.ie/121054/55e77ccd-7d71-4553-90c9-5cd6cdee7420.pdf', # (p. 53) up to wk 1
'https://assets.gov.ie/121662/184e8d00-9080-44aa-af74-dbb13b0dcd34.pdf', # (p. 2, bullet 8) wk 2/3
'https://assets.gov.ie/122798/644f5185-5067-4bd4-89fa-8cb75670821d.pdf', # p. 2, bullet 5
],
('2020-W50-4', 0.014),
('2020-W51-4', 0.086),
('2020-W52-4', 0.163),
('2020-W53-4', 0.262),
('2021-W01-4', 0.463), # 21 Jan
('2021-W02-4', 0.58),
('2021-W03-4', 0.63), # 28 Jan
('2021-W04-4', 0.695), # 4 Feb
('2021-W05-4', 0.75), # 4 Feb
]
}
set_date_in_records_keys(country_records)
cdict = {}
meta_records = []
for desc, records in country_records.items():
cdict[desc] = _convert_ywd_records(records[2:], ['f_b117'])
_add_meta_record(meta_records, desc, records[0], records[1])
return cdict, meta_records
#%%
regions_pop = {
'South East England': 9180135,
'London': 8961989,
'North West England': 7341196,
'East England': 6236072,
'West Midlands': 5934037,
'South West England': 5624696,
'Yorkshire': 5502967,
'East Midlands': 4835928,
'North East England': 2669941,
}
regions_pop['England (multiple regions; 2021-01-15)'] = sum(regions_pop.values())
uk_countries_pop = {
'England': 56286961,
'Scotland': 5463300,
'Wales': 3152879,
'Northern Ireland': 1893667,
}
def _get_data_England_regions(subtract_bg=True):
"""Get datasets for England regions. Original data represents 'positive population'.
Dividing by 28 days and time-shifting 14 days to get estimated daily increments.
With subtract_bg: Subtracting lowest region value - assuming background
false-positive for S-gene target failure.
Data source: Walker et al., https://doi.org/10.1101/2021.01.13.21249721
Published 2021-01-15.
"""
index_combi = pd.date_range('2020-09-28', '2020-12-14', freq='7 d')
df_combi = pd.DataFrame(index=index_combi)
ncolumns = ['pct_sgtf', 'pct_othr']
for col in ncolumns:
df_combi[col] = 0
pub_date = '2021-01-15'
cdict = {f'England (SGTF; multiple regions; {pub_date})': df_combi}
for fpath in sorted(Path('data').glob('uk_*_b117_pop.csv')):
ma = re.search('uk_(.*)_b117', str(fpath))
region = ma.group(1).replace('_', ' ')
df = pd.read_csv(fpath, comment='#').rename(columns={'Unnamed: 0': 'Date'})
df['Date'] = pd.to_datetime(df['Date']) - pd.Timedelta(14, 'd')
df = df.set_index('Date')
# interpolate and add to the combined dataframe.
df2 = pd.DataFrame(index=index_combi) # resampling data here
df2 = df2.merge(df[ncolumns], how='outer', left_index=True, right_index=True)
df2 = df2.interpolate(method='quadratic').loc[index_combi]
for col in ncolumns:
df_combi[col] += df2[col]
cdict[f'{region} (SGTF; {pub_date}'] = df
# convert to estimated new cases per day.
for key, df in cdict.items():
region = re.match(r'(.*) \(.*', key).group(1)
if region == 'England':
region = f'England (multiple regions; {pub_date})'
# estimate false-positive for SGTF as representing B.1.1.7
if subtract_bg:
pct_bg = df['pct_sgtf'].min()
else:
pct_bg = 0.0
df['n_b117'] = ((df['pct_sgtf'] - pct_bg)*(0.01/28 * regions_pop[region])).astype(int)
df['n_oth'] = ((df['pct_othr'] + pct_bg)*(0.01/28 * regions_pop[region])).astype(int)
# this doesn't work
# if subtract_bg:
# pct_tot = df['pct_sgtf'] + df['pct_othr']
# # f: fraction of positive test. Correct for background.
# f_sgtf = df['pct_sgtf']/pct_tot
# f_sgtf_min = f_sgtf.min()
# f_sgtf -= f_sgtf_min
# # convert back to pct values
# df['pct_sgtf'] = pct_tot * f_sgtf
# df['pct_othr'] = pct_tot * (1-f_sgtf)
# df['n_b117'] = (df['pct_sgtf'] * (0.01/28 * regions_pop[region])).astype(int)
# df['n_oth'] = (df['pct_othr'] * (0.01/28 * regions_pop[region])).astype(int)
df.drop(index=df.index[df['n_b117'] <= 0], inplace=True)
df['n_pos'] = df['n_b117'] + df['n_oth']
df['or_b117'] = df['n_b117'] / df['n_oth']
df['f_b117'] = df['or_b117']/(1 + df['or_b117'])
for col in ncolumns + ['n_pos']:
df_combi[col] = np.around(df_combi[col], 0).astype(int)
meta_records = []
for desc in cdict.keys():
region = re.match('(.*) \(', desc).group(1)
record = dict(
desc=desc,
date=pd.to_datetime(pub_date),
en_part=region,
is_recent=True,
is_seq=False,
is_sgtf=True,
refs=('https://doi.org/10.1101/2021.01.13.21249721',)
)
meta_records.append(record)
return cdict, meta_records
def load_uk_ons_gov_country_by_var():
"""Get data based on data/ons_gov_uk_country_by_var.xlsx.
Return:
- dataframe
- date_pub
- tuple of source URLs
Dataframe layout:
- index: Date.
- columns: {country_name}:{suffix}
with suffix = 'pnew', 'pnew_lo', 'pnew_hi', 'poth', ..., 'pnid', ...
for percentages new UK variant, CI low, CI high,
other variant, not-identified.
"""
refs = [
'https://www.ons.gov.uk/peoplepopulationandcommunity/healthandsocialcare/conditionsanddiseases/bulletins/coronaviruscovid19infectionsurveypilot/29january2021#positive-tests-that-are-compatible-with-the-new-uk-variant',
'https://www.ons.gov.uk/visualisations/dvc1163/countrybyvar/datadownload.xlsx',
]
# Excel sheet: groups of 9 columns by country (England, Wales, NI, Scotland).
xls_fname = 'data/ons_gov_uk_country_by_var.xlsx'
# 1st round: sanity check and construct better column names.
df = pd.read_excel(xls_fname, skiprows=3)
assert np.all(df.columns[[1, 10]] == ['England', 'Wales'])
assert df.iloc[0][0] == 'Date'
assert df.iloc[0][1] == '% testing positive new variant compatible*'
# percentages new variant, other, unidentified, with 95% CIs.
col_suffixes = ['pnew', 'pnew_hi', 'pnew_lo', 'poth', 'poth_hi', 'poth_lo',
'pnid', 'pnid_hi', 'pnid_lo']
colmap = {df.columns[0]: 'Date'}
for i in range(1, 37, 9):
country_name = df.columns[i]
for j in range(9):
colmap[df.columns[i+j]] = f'{country_name}:{col_suffixes[j]}'
df.rename(columns=colmap, inplace=True)
df.drop(columns=df.columns[37:], inplace=True)
# find the end of the data
i_stop = 2 + np.argmax(df['Date'].iloc[2:].isna())
assert i_stop >= 44
df = df.iloc[2:i_stop]
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace=True)
if df.index[-1] == | pd.to_datetime('2021-01-23') | pandas.to_datetime |
import os
import io
import random
import string
import re
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import nltk
from nltk import FreqDist
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import config
EMPH_TOKEN = config.EMPH_TOKEN
CONTRAST_TOKEN = config.CONTRAST_TOKEN
CONCESSION_TOKEN = config.CONCESSION_TOKEN
# TODO: redesign the data loading so as to be object-oriented
def load_training_data(data_trainset, data_devset, input_concat=False, generate_vocab=False, skip_if_exist=True):
"""Generate source and target files in the required input format for the model training.
"""
training_source_file = os.path.join(config.DATA_DIR, 'training_source.txt')
training_target_file = os.path.join(config.DATA_DIR, 'training_target.txt')
dev_source_file = os.path.join(config.DATA_DIR, 'dev_source.txt')
dev_target_file = os.path.join(config.DATA_DIR, 'dev_target.txt')
if skip_if_exist:
# If there is an existing source and target file, skip their generation
if os.path.isfile(training_source_file) and \
os.path.isfile(training_target_file) and \
os.path.isfile(dev_source_file) and \
os.path.isfile(dev_target_file):
print('Found existing input files. Skipping their generation.')
return
dataset = init_training_data(data_trainset, data_devset)
dataset_name = dataset['dataset_name']
x_train, y_train, x_dev, y_dev = dataset['data']
_, _, slot_sep, val_sep, val_sep_end = dataset['separators']
# Preprocess the MRs and the utterances
x_train = [preprocess_mr(x, dataset['separators']) for x in x_train]
x_dev = [preprocess_mr(x, dataset['separators']) for x in x_dev]
y_train = [preprocess_utterance(y) for y in y_train]
y_dev = [preprocess_utterance(y) for y in y_dev]
# Produce sequences of extracted words from the meaning representations (MRs) in the trainset
x_train_seq = []
for i, mr in enumerate(x_train):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
slot_ctr += 1
# Delexicalize the MR and the utterance
y_train[i] = delex_sample(mr_dict, y_train[i], dataset=dataset_name, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_train_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_train_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_train_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_train_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_train_seq[i].extend([key] + val.split())
else:
x_train_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_train_seq[i].append('<STOP>')
# Produce sequences of extracted words from the meaning representations (MRs) in the devset
x_dev_seq = []
for i, mr in enumerate(x_dev):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
slot_ctr += 1
# Delexicalize the MR and the utterance
y_dev[i] = delex_sample(mr_dict, y_dev[i], dataset=dataset_name, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_dev_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_dev_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_dev_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_dev_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_dev_seq[i].extend([key] + val.split())
else:
x_dev_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_dev_seq[i].append('<STOP>')
y_train_seq = [word_tokenize(y) for y in y_train]
y_dev_seq = [word_tokenize(y) for y in y_dev]
# Generate a vocabulary file if necessary
if generate_vocab:
generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq),
vocab_filename='vocab.lang_gen.tokens')
# generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq),
# vocab_filename='vocab.lang_gen_multi_vocab.source')
# generate_vocab_file(np.concatenate(y_train_seq + y_dev_seq),
# vocab_filename='vocab.lang_gen_multi_vocab.target')
with io.open(training_source_file, 'w', encoding='utf8') as f_x_train:
for line in x_train_seq:
f_x_train.write('{}\n'.format(' '.join(line)))
with io.open(training_target_file, 'w', encoding='utf8') as f_y_train:
for line in y_train:
f_y_train.write(line + '\n')
with io.open(dev_source_file, 'w', encoding='utf8') as f_x_dev:
for line in x_dev_seq:
f_x_dev.write('{}\n'.format(' '.join(line)))
with io.open(dev_target_file, 'w', encoding='utf8') as f_y_dev:
for line in y_dev:
f_y_dev.write(line + '\n')
return np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq).flatten()
def load_test_data(data_testset, input_concat=False):
"""Generate source and target files in the required input format for the model testing.
"""
test_source_file = os.path.join(config.DATA_DIR, 'test_source.txt')
test_source_dict_file = os.path.join(config.DATA_DIR, 'test_source_dict.json')
test_target_file = os.path.join(config.DATA_DIR, 'test_target.txt')
test_reference_file = os.path.join(config.METRICS_DIR, 'test_references.txt')
dataset = init_test_data(data_testset)
dataset_name = dataset['dataset_name']
x_test, y_test = dataset['data']
_, _, slot_sep, val_sep, val_sep_end = dataset['separators']
# Preprocess the MRs
x_test = [preprocess_mr(x, dataset['separators']) for x in x_test]
# Produce sequences of extracted words from the meaning representations (MRs) in the testset
x_test_seq = []
x_test_dict = []
for i, mr in enumerate(x_test):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
mr_dict_cased = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
mr_dict_cased[slot] = value_orig
slot_ctr += 1
# Build an MR dictionary with original values
x_test_dict.append(mr_dict_cased)
# Delexicalize the MR
delex_sample(mr_dict, dataset=dataset_name, mr_only=True, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_test_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_test_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_test_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_test_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_test_seq[i].extend([key] + val.split())
else:
x_test_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_test_seq[i].append('<STOP>')
with io.open(test_source_file, 'w', encoding='utf8') as f_x_test:
for line in x_test_seq:
f_x_test.write('{}\n'.format(' '.join(line)))
with io.open(test_source_dict_file, 'w', encoding='utf8') as f_x_test_dict:
json.dump(x_test_dict, f_x_test_dict)
if len(y_test) > 0:
with io.open(test_target_file, 'w', encoding='utf8') as f_y_test:
for line in y_test:
f_y_test.write(line + '\n')
# Reference file for calculating metrics for test predictions
with io.open(test_reference_file, 'w', encoding='utf8') as f_y_test:
for i, line in enumerate(y_test):
if i > 0 and x_test[i] != x_test[i - 1]:
f_y_test.write('\n')
f_y_test.write(line + '\n')
def generate_vocab_file(token_sequences, vocab_filename, vocab_size=10000):
vocab_file = os.path.join(config.DATA_DIR, vocab_filename)
distr = FreqDist(token_sequences)
vocab = distr.most_common(min(len(distr), vocab_size - 3)) # cap the vocabulary size
vocab_with_reserved_tokens = ['<pad>', '<EOS>'] + list(map(lambda tup: tup[0], vocab)) + ['UNK']
with io.open(vocab_file, 'w', encoding='utf8') as f_vocab:
for token in vocab_with_reserved_tokens:
f_vocab.write('{}\n'.format(token))
def get_vocabulary(token_sequences, vocab_size=10000):
distr = FreqDist(token_sequences)
vocab = distr.most_common(min(len(distr), vocab_size)) # cap the vocabulary size
vocab_set = set(map(lambda tup: tup[0], vocab))
return vocab_set
# TODO: generalize and utilize in the loading functions
def tokenize_mr(mr):
"""Produces a (delexicalized) sequence of tokens from the input MR.
Method used in the client to preprocess a single MR that is sent to the service for utterance generation.
"""
slot_sep = ','
val_sep = '['
val_sep_end = ']'
mr_seq = []
slot_ctr = 0
emph_idxs = set()
mr_dict = OrderedDict()
mr_dict_cased = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
mr_dict_cased[slot] = value_orig
slot_ctr += 1
# Delexicalize the MR
delex_sample(mr_dict, mr_only=True)
slot_ctr = 0
# Convert the dictionary to a list
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
mr_seq.append(EMPH_TOKEN)
if len(val) > 0:
mr_seq.extend([key] + val.split())
else:
mr_seq.append(key)
slot_ctr += 1
return mr_seq, mr_dict_cased
def load_training_data_for_eval(data_trainset, data_model_outputs_train, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_trainset or '\\rest_e2e\\' in data_trainset:
x_train, y_train_1 = read_rest_e2e_dataset_train(data_trainset)
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_trainset or '\\tv\\' in data_trainset:
x_train, y_train_1, y_train_2 = read_tv_dataset_train(data_trainset)
if data_model_outputs_train is not None:
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_trainset or '\\laptop\\' in data_trainset:
x_train, y_train_1, y_train_2 = read_laptop_dataset_train(data_trainset)
if data_model_outputs_train is not None:
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_train_1 = [preprocess_utterance(y) for y in y_train_1]
y_train_2 = [preprocess_utterance(y) for y in y_train_2]
# produce sequences of extracted words from the meaning representations (MRs) in the trainset
x_train_seq = []
for i, mr in enumerate(x_train):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_train_1[i] = delex_sample(mr_dict, y_train_1[i], dataset=dataset_name, utterance_only=True)
y_train_2[i] = delex_sample(mr_dict, y_train_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_train_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_train_seq[i].extend([key, val])
else:
x_train_seq[i].append(key)
# create source vocabulary
if os.path.isfile('data/eval_vocab_source.json'):
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
else:
x_distr = FreqDist([x_token for x in x_train_seq for x_token in x])
x_vocab = x_distr.most_common(min(len(x_distr), vocab_size - 2)) # cap the vocabulary size
with io.open('data/eval_vocab_source.json', 'w', encoding='utf8') as f_x_vocab:
json.dump(x_vocab, f_x_vocab, ensure_ascii=False)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# create target vocabulary
if os.path.isfile('data/eval_vocab_target.json'):
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
else:
y_distr = FreqDist([y_token for y in y_train_1 for y_token in y] + [y_token for y in y_train_2 for y_token in y])
y_vocab = y_distr.most_common(min(len(y_distr), vocab_size - 2)) # cap the vocabulary size
with io.open('data/eval_vocab_target.json', 'w', encoding='utf8') as f_y_vocab:
json.dump(y_vocab, f_y_vocab, ensure_ascii=False)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the training set
x_train_enc = token_seq_to_idx_seq(x_train_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the training set
y_train_1_enc = token_seq_to_idx_seq(y_train_1, y_word2idx, max_output_seq_len)
# produce sequences of indexes from the utterances in the training set
y_train_2_enc = token_seq_to_idx_seq(y_train_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the training set
labels_train = np.concatenate((np.ones(len(y_train_1_enc)), np.zeros(len(y_train_2_enc))))
return (np.concatenate((np.array(x_train_enc), np.array(x_train_enc))),
np.concatenate((np.array(y_train_1_enc), np.array(y_train_2_enc))),
labels_train)
def load_dev_data_for_eval(data_devset, data_model_outputs_dev, vocab_size, max_input_seq_len, max_output_seq_len, delex=True):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_devset or '\\rest_e2e\\' in data_devset:
x_dev, y_dev_1 = read_rest_e2e_dataset_dev(data_devset)
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_devset or '\\tv\\' in data_devset:
x_dev, y_dev_1, y_dev_2 = read_tv_dataset_dev(data_devset)
if data_model_outputs_dev is not None:
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_devset or '\\laptop\\' in data_devset:
x_dev, y_dev_1, y_dev_2 = read_laptop_dataset_dev(data_devset)
if data_model_outputs_dev is not None:
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_dev_1 = [preprocess_utterance(y) for y in y_dev_1]
y_dev_2 = [preprocess_utterance(y) for y in y_dev_2]
# produce sequences of extracted words from the meaning representations (MRs) in the devset
x_dev_seq = []
for i, mr in enumerate(x_dev):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_dev_1[i] = delex_sample(mr_dict, y_dev_1[i], dataset=dataset_name, utterance_only=True)
y_dev_2[i] = delex_sample(mr_dict, y_dev_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_dev_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_dev_seq[i].extend([key, val])
else:
x_dev_seq[i].append(key)
# load the source vocabulary
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# load the target vocabulary
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the devset
x_dev_enc = token_seq_to_idx_seq(x_dev_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the devset
y_dev_1_enc = token_seq_to_idx_seq(y_dev_1, y_word2idx, max_output_seq_len)
# produce sequences of indexes from the utterances in the devset
y_dev_2_enc = token_seq_to_idx_seq(y_dev_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the devset
labels_dev = np.concatenate((np.ones(len(y_dev_1_enc)), np.zeros(len(y_dev_2_enc))))
return (np.concatenate((np.array(x_dev_enc), np.array(x_dev_enc))),
np.concatenate((np.array(y_dev_1_enc), np.array(y_dev_2_enc))),
labels_dev)
def load_test_data_for_eval(data_testset, data_model_outputs_test, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_testset or '\\rest_e2e\\' in data_testset:
x_test, _ = read_rest_e2e_dataset_test(data_testset)
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_testset or '\\tv\\' in data_testset:
x_test, _, y_test = read_tv_dataset_test(data_testset)
if data_model_outputs_test is not None:
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_testset or '\\laptop\\' in data_testset:
x_test, _, y_test = read_laptop_dataset_test(data_testset)
if data_model_outputs_test is not None:
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_test = [preprocess_utterance(y) for y in y_test]
#y_test_1 = [preprocess_utterance(y) for y in y_test_1]
#y_test_2 = [preprocess_utterance(y) for y in y_test_2]
# produce sequences of extracted words from the meaning representations (MRs) in the testset
x_test_seq = []
for i, mr in enumerate(x_test):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_test[i] = delex_sample(mr_dict, y_test[i], dataset=dataset_name)
#y_test_1[i] = delex_sample(mr_dict, y_test_1[i], dataset=dataset_name, utterance_only=True)
#y_test_2[i] = delex_sample(mr_dict, y_test_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_test_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_test_seq[i].extend([key, val])
else:
x_test_seq[i].append(key)
# load the source vocabulary
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# load the target vocabulary
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the test set
x_test_enc = token_seq_to_idx_seq(x_test_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the test set
y_test_enc = token_seq_to_idx_seq(y_test, y_word2idx, max_output_seq_len)
#y_test_1_enc = token_seq_to_idx_seq(y_test_1, y_word2idx, max_output_seq_len)
#y_test_2_enc = token_seq_to_idx_seq(y_test_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the test set
labels_test = np.ones(len(y_test_enc))
#labels_test = np.concatenate((np.ones(len(y_test_1_enc)), np.zeros(len(y_test_2_enc))))
return (np.array(x_test_enc),
np.array(y_test_enc),
labels_test,
x_idx2word,
y_idx2word)
#return (np.concatenate((np.array(x_test_enc), np.array(x_test_enc))),
# np.concatenate((np.array(y_test_1_enc), np.array(y_test_2_enc))),
# labels_test,
# x_idx2word,
# y_idx2word)
# ---- AUXILIARY FUNCTIONS ----
def init_training_data(data_trainset, data_devset):
if 'rest_e2e' in data_trainset and 'rest_e2e' in data_devset:
x_train, y_train = read_rest_e2e_dataset_train(data_trainset)
x_dev, y_dev = read_rest_e2e_dataset_dev(data_devset)
dataset_name = 'rest_e2e'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'video_game' in data_trainset and 'video_game' in data_devset:
x_train, y_train = read_video_game_dataset_train(data_trainset)
x_dev, y_dev = read_video_game_dataset_dev(data_devset)
dataset_name = 'video_game'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'tv' in data_trainset and 'tv' in data_devset:
x_train, y_train, _ = read_tv_dataset_train(data_trainset)
x_dev, y_dev, _ = read_tv_dataset_dev(data_devset)
dataset_name = 'tv'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'laptop' in data_trainset and 'laptop' in data_devset:
x_train, y_train, _ = read_laptop_dataset_train(data_trainset)
x_dev, y_dev, _ = read_laptop_dataset_dev(data_devset)
dataset_name = 'laptop'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'hotel' in data_trainset and 'hotel' in data_devset:
x_train, y_train, _ = read_hotel_dataset_train(data_trainset)
x_dev, y_dev, _ = read_hotel_dataset_dev(data_devset)
dataset_name = 'hotel'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
else:
raise ValueError('Unexpected file name or path: {0}, {1}'.format(data_trainset, data_devset))
return {
'dataset_name': dataset_name,
'data': (x_train, y_train, x_dev, y_dev),
'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)
}
def init_test_data(data_testset):
if 'rest_e2e' in data_testset:
x_test, y_test = read_rest_e2e_dataset_test(data_testset)
dataset_name = 'rest_e2e'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'video_game' in data_testset:
x_test, y_test = read_video_game_dataset_test(data_testset)
dataset_name = 'video_game'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'tv' in data_testset:
x_test, y_test, _ = read_tv_dataset_test(data_testset)
dataset_name = 'tv'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'laptop' in data_testset:
x_test, y_test, _ = read_laptop_dataset_test(data_testset)
dataset_name = 'laptop'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'hotel' in data_testset:
x_test, y_test, _ = read_hotel_dataset_test(data_testset)
dataset_name = 'hotel'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
else:
raise ValueError('Unexpected file name or path: {0}'.format(data_testset))
return {
'dataset_name': dataset_name,
'data': (x_test, y_test),
'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)
}
def read_rest_e2e_dataset_train(data_trainset):
# read the training data from file
df_train = | pd.read_csv(data_trainset, header=0, encoding='utf8') | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 17:30:50 2019
@author: smith
"""
import ClearMap.IO.IO as io
import numpy as np
import pandas as pd
import os
samples = ['IA1_RT', 'IA1_RB', 'IA1_LT', 'IA1_LB',
'IA2_NP', 'IA2_RT', 'IA2_RB', 'IA2_LT', 'IA2_LB']
dataList = []
for mouse in samples:
sampleName = mouse
baseDirectory = '/d2/studies/ClearMap/IA_iDISCO/' + sampleName
#sampleName = 'IA2_LT'
#baseDirectory = '/d2/studies/ClearMap/IA_iDISCO/' + sampleName
#sampleName = mouse
##IMPORT PREVIOUSLY PRE-PROCESSED POINTS DATA
hemisphere = '_left'
data = io.readData(os.path.join(baseDirectory, sampleName + '_Caudoputamen' + '_isolated_points' + hemisphere + '.tif'))
points = np.nonzero(data)[:3]
dfPoints = pd.DataFrame(points, index=['x', 'y', 'z']).T
dfPoints.rename(columns={0: "x", 1: "y", 2: "z"})
###View Range:
x_range=dfPoints.x.max() - dfPoints.x.min()
y_range=dfPoints.y.max() - dfPoints.y.min()
z_range=dfPoints.z.max() - dfPoints.z.min()
print(x_range, y_range, z_range)
#Bin Y axis
dfPoints['y_bins']=pd.cut(dfPoints['y'], bins=2)
dfPoints['y_bins'].value_counts()
dfPoints_counts_y = dfPoints['y_bins'].value_counts()
dfPoints_sorted_y = dfPoints_counts_y.sort_index()
dfPoints_sorted_y.to_excel(os.path.join(baseDirectory, 'y_binned_striatum_Jan11' + hemisphere + '.xlsx'))
#figY = dfPoints_sorted_y.plot.bar(figsize=(20,10))
print(dfPoints_sorted_y)
#2nd Iteration - Splits anterior half of striatum into 3 subregions
firstHalf = dfPoints_sorted_y[0]
dfPoints_ant = dfPoints.sort_values(by=['y'])[:firstHalf]
dfPoints_ant['y_iter2'] = pd.cut(dfPoints_ant['y'], bins=3)
dfPoints_ant['y_iter2'].value_counts()
dfPoints_ant.sort_values('y_iter2')
dfPoints_ant_count = dfPoints_ant['y_iter2'].value_counts()
dfPoints_ant_count.sort_index()
ant0 = dfPoints_ant_count.sort_index()[0]
ant1 = dfPoints_ant_count.sort_index()[1]
ant2 = dfPoints_ant_count.sort_index()[2:]
ant2 = ant2[0]
#Check that bins were split correctly:
if firstHalf != ant0+ant1+ant2:
raise ValueError('Variables Not Equal!')
else: print('Values Are Equal')
#Split each of the 3 subregions into medial/lateral and count cells:
dfPoints_ant0 = dfPoints_ant.sort_values(by=['y_iter2'])[:ant0]
dfPoints_ant0['x_bins0'] = pd.cut(dfPoints_ant0['x'], bins=2)
dfPoints_ant0_count = dfPoints_ant0['x_bins0'].value_counts()
dfPoints_ant0_count.sort_index()
dfPoints_ant1 = dfPoints_ant.sort_values(by=['y_iter2'])[ant0:ant0+ant1]
dfPoints_ant1['x_bins1'] = pd.cut(dfPoints_ant1['x'], bins=2)
dfPoints_ant1_count = dfPoints_ant1['x_bins1'].value_counts()
dfPoints_ant1_count.sort_index()
dfPoints_ant2 = dfPoints_ant.sort_values(by=['y_iter2'])[ant0+ant1:]
dfPoints_ant2['x_bins2'] = | pd.cut(dfPoints_ant2['x'], bins=2) | pandas.cut |
from sklearn import svm, metrics
from sklearn.model_selection import train_test_split as split
import pandas
table = pandas.read_csv("bmi.csv")
weight = table["weight"] / 100
height = table["height"] / 200
label = table["label"]
multi = | pandas.concat([weight, height], axis=1) | pandas.concat |
#! /home/jan/anaconda3/bin/python
""" -------------------------------
Copyright (C) 2018 RISE
This code was produced by RISE
The 2013-03-26 version
bonsai/src_v02/bonsai_time.py
support for managing of time data
and time analysis
------------------------------------"""
from datetime import datetime as timeclass
from datetime import timedelta as delta
from dateutil import parser
import pandas as pd
import numpy as np
import common
import global_settings as gs
import bonsai_io as bio
def head(xs, lex8, lex9, n = 1):
ys = []
for x in xs:
if isinstance(x, str):
cx = common.lookup_icd10(x, lex8, lex9)
for c in cx:
ys += [c[:n]]
return ys
def split_and_head(ys, recidiv_icd10_class, lex8, lex9, n = 1):
cs = []
for y in ys.values:
if not common.isnull(y):
cs = np.append(cs, y.split())
cs = list(cs)
ys = np.unique(head(cs, lex8, lex9, n = n))
ys = list(set(ys) - set([recidiv_icd10_class]))
return ys
# here we can instead return the difference
def after(x, y, d = 0):
if common.notnull(x) and common.notnull(y):
return parser.parse(x) > parser.parse(y) + delta(days = d)
return False
# here we can instead return the time and difference
def times_after(sz, y, d = 0):
L = []
for z in sz:
if after(z, y, d):
L += [z]
return L
def first_time(zs):
L = list(map(common.str2time, zs))
if L == []:
return '0'
# t = min(list(map(common.str2time, zs)))
return common.time2str(min(L))
def first_after(zs, y, d = 0):
return first_time(times_after(zs, y, d))
def first_time_compression(df, xcol, ycol, zcol, fcol, lex8, lex9, gap = 1826, split = True, n = 1):
# xcol = 'LopNr'
# ycol = 'DIAGNOS'
# fcol = 'first_incare'
data = []
df = df.dropna()
if df.empty:
return df
xs = df[xcol].drop_duplicates()
print()
print('first_time_compression')
print()
i = 0
print('nr of LopNr:s', len(xs))
for x in xs:
i += 1
if (i % 100) == 0:
print(i)
dx = df[ df[xcol] == x ]
diagnos_dat = dx['DiagnosDat'].values[0]
recidiv_icd10 = dx['ICD10'].values[0]
recidiv_icd10_class = recidiv_icd10[:n]
sz = dx[zcol].drop_duplicates()
yz_list = []
for z in sz:
dz = dx[ dx[zcol] == z ]
ys = dz[ycol].drop_duplicates()
if split:
ys = split_and_head(ys, recidiv_icd10_class, lex8, lex9, n = n)
for y in ys:
yz_list += [[y, z]]
if not (yz_list == []):
dyz = pd.DataFrame(yz_list)
dyz.columns = [ycol, zcol]
sy = dyz[ycol].drop_duplicates()
yminz_list = []
for y in sy:
dy = dyz[ dyz[ycol] == y ]
times = dy[zcol].values
z = first_after(times, diagnos_dat, gap)
if z != '0':
yminz_list += [[y, z]]
data = data + [ [x] + [yminz_list] ]
dz = pd.DataFrame(data)
if not dz.empty:
dz.columns = [xcol, fcol]
return dz
def first_time_aho(df, base, xcol, ycol, zcol, fcol, lex8, lex9, gap = 1826, split = True, n = 1):
# xcol = 'LopNr'
# ycol = 'DIAGNOS'
# fcol = 'first_incare'
data = []
xs = base[xcol]
print()
print('first_time_compression aho')
print()
i = 0
df.set_index(xcol,drop=False,inplace=True)
print('nr of LopNr:s', len(xs))
for x in xs:
i += 1
if (i % 100) == 0:
print(i)
if x not in df.index:
continue
dx = df.loc[x]
if type(dx) != pd.DataFrame:
dx = pd.DataFrame([dx])
diagnos_dat = base['DiagnosDat'][x]
recidiv_icd10 = base['ICD10'][x]
recidiv_icd10_class = recidiv_icd10[:n] if isinstance(recidiv_icd10, str) else ""
yz_list = []
for j in range(len(dx)):
yz_list += split_and_head_1(dx.iloc[j], ycol, zcol, recidiv_icd10_class, lex8, lex9, n = n)
if not (yz_list == []):
dyz = pd.DataFrame(yz_list)
dyz.columns = [ycol, zcol]
sy = dyz[ycol].drop_duplicates()
yminz_list = []
for y in sy:
dy = dyz[ dyz[ycol] == y ]
times = dy[zcol].values
z = first_after(times, diagnos_dat, gap)
if z != '0':
yminz_list += [[y, z]]
data = data + [ [x] + [yminz_list] ]
dz = pd.DataFrame(data)
if not dz.empty:
dz.columns = [xcol, fcol]
return dz
def all_times(df, xcol, ycol, zcol, acol, lex8, lex9, split = True, n = 1):
# xcol = 'LopNr'
# ycol = 'DIAGNOS'
# acol = 'all_incare'
data = []
df = df.dropna()
if df.empty:
return df
xs = df[xcol].drop_duplicates()
print()
print('all_time_compression')
print()
i = 0
for x in xs:
i += 1
if (i % 100) == 0:
print(i)
dx = df[ df[xcol] == x ]
diagnos_dat = dx['DiagnosDat'].values[0]
recidiv_icd10 = dx['ICD10'].values[0]
recidiv_icd10_class = recidiv_icd10[:n]
sz = dx[zcol].drop_duplicates()
yz_list = []
for z in sz:
dz = dx[ dx[zcol] == z ]
ys = dz[ycol].drop_duplicates()
if split:
ys = split_and_head(ys, recidiv_icd10_class, lex8, lex9, n = n)
for y in ys:
yz_list += [[y, z]]
if not (yz_list == []):
dyz = pd.DataFrame(yz_list)
dyz.columns = [ycol, zcol]
sy = dyz[ycol].drop_duplicates()
yallz_list = []
for y in sy:
dy = dyz[ dyz[ycol] == y ]
times = list(dy[zcol].values)
yallz_list += [[y, times]]
data = data + [ [x] + [yallz_list] ]
dz = pd.DataFrame(data)
if not dz.empty:
dz.columns = [xcol, acol]
return dz
def split_and_head_1(row, ycol, zcol, recidiv_icd10_class, lex8, lex9, n = 1):
if common.isnull(row[ycol]) or common.isnull(row[zcol]):
return []
cs = row[ycol].split()
ys = np.unique(head(cs, lex8, lex9, n = n))
ys = list(set(ys) - set([recidiv_icd10_class]))
return [[y, row[zcol]] for y in ys]
def all_times_aho(df, base, xcol, ycol, zcol, acol, lex8, lex9, split = True, n = 1):
# xcol = 'LopNr'
# ycol = 'DIAGNOS'
# acol = 'all_incare'
data = []
xs = base[xcol]
print()
print('all_time_compression aho')
print()
i = 0
df.set_index(xcol,drop=False,inplace=True)
for x in xs:
i += 1
if (i % 100) == 0:
print(i)
if x not in df.index:
continue
dx = df.loc[x]
if type(dx) != pd.DataFrame:
dx = | pd.DataFrame([dx]) | pandas.DataFrame |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_trade_undefined_instrument(self):
blt = self.make_blotter()
ts = pd.Timestamp('2016-12-10T08:30:00')
instr = 'CLZ6'
qty = 1
price = 48.56
def make_trade():
blt._trade(ts, instr, qty, price)
self.assertRaises(KeyError, make_trade)
def test_get_meta_data(self):
blt = blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# currency of instrument defaults to base ccy of blotter when not given
blt.define_generic("CL", margin=0.1, multiplier=100, commission=2.5,
isFX=False)
meta = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
metadata_exp = meta("USD", 0.1, 100, 2.5, False)
metadata = blt._gnrc_meta["CL"]
self.assertEqual(metadata, metadata_exp)
def test_get_holdings_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
hlds = blt.get_holdings_value(ts)
assert_series_equal(hlds, pd.Series())
def test_get_holdings_value_no_fx_conversion(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "ZAR", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, 'SXMZ15', qty, price)
def no_fx():
return blt.get_holdings_value(ts)
self.assertRaises(KeyError, no_fx)
def test_get_holdings_timestamp_before(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-05T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_holdings():
blt.get_holdings_value(ts)
self.assertRaises(ValueError, get_holdings)
def test_get_holdings_base_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([2082.73 * 100], index=['ESZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_AUD_instr_AUDUSD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'APZ15'
qty = 1
price = 5200
blt.define_generic("AP", "AUD", 0.1, 1, 2.5)
blt.map_instrument("AP", "APZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([5283 * 0.73457], index=['APZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_CAD_instr_USDCAD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'SXMZ15'
qty = 1
price = 802.52
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([795.95 / 1.3183], index=['SXMZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_instruments_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
instrs = blt.get_instruments()
assert_series_equal(instrs, pd.Series())
def test_get_instruments_multiplier(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
instrs = blt.get_instruments()
instrs_exp = | pd.Series([qty], index=['ESZ15']) | pandas.Series |
import re
import pandas as pd
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
def collate_fn(batch):
inputs, attention_masks, labels = list(zip(*batch))
inputs = pad_sequence(inputs, batch_first=True)
attention_masks = pad_sequence(attention_masks, padding_value=False, batch_first=True)
labels = pad_sequence([l.transpose(0, 1).reshape(-1) for l in labels], batch_first=True, \
padding_value=-1).reshape(len(labels), -1, labels[0].shape[0]).transpose(1, 2).long()
return inputs, attention_masks, labels
def make_title(plain):
for line in plain:
pos = line.find(' - Wikipedia Dump')
if pos != -1:
title = line[:pos]
break
return title
class SearchBottomLine:
def __init__(self):
self.bottom_flag = False
def search(self, line):
if self.bottom_flag == False:
if re.search(r'」から取得', line) is not None:
self.bottom_flag = True
if line in ['脚注\n', '参考文献\n', '外部リンク\n', '関連項目\n']:
self.bottom_flag = True
if self.bottom_flag == True:
return True
return False
def remove_char(char_dict, line_flag):
if re.match(r'\S', char_dict['char']) is None:
return True
if char_dict['line_id'] <= 26:
return True
if line_flag == True:
return True
return False
def remove_line(line):
if line[0] == '^':
line = ' '*len(line)
pattern = r'「?(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|][-A-Za-z0-9+&@#/%=~_|]+[-A-Za-z0-9+&@#/%=~_|]*?'
if re.search(pattern, line):
line = re.sub(pattern, ' '*len(re.search(pattern,line).group()), line)
pattern = r'((http|ftp|https)://)*(([a-zA-Z0-9\._-]+\.[a-zA-Z]{2,6})|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\&%_\./-~-]*)?'
if re.search(pattern, line):
line = re.sub(pattern, ' '*len(re.search(pattern,line).group()), line)
pattern = r'ファイル:(.+?\..{3})'
if re.search(pattern, line):
line = re.sub(pattern, ' '*len(re.search(pattern, line).group()), line)
pattern = r'移動先: 案内、 検索'
if re.search(pattern, line):
line = re.sub(pattern, ' '*len(re.search(pattern, line).group()), line)
pattern = r'出典: フリー百科事典『ウィキペディア(Wikipedia)』'
if re.search(pattern, line):
line = re.sub(pattern, ' '*len(re.search(pattern, line).group()), line)
return line
class CharbertDataset(Dataset):
def __init__(self, page2plain, tokenizer, attr2idx, bio2idx, max_length, block, df_anno=None):
self.page2plain = page2plain
self.tokenizer = tokenizer
self.attr2idx = attr2idx
self.bio2idx = bio2idx
self.block = block
self.df_anno = df_anno
self.max_length = max_length
self._make_dataset()
def __len__(self):
return self.data_num
def __getitem__(self, idx):
return self.out_inputs[idx], self.out_attention_masks[idx], self.out_labels[idx]
def _make_loa2bio(self, page_id):
loa2bio = {}
if self.df_anno is not None:
for df_dict in self.df_anno[self.df_anno['page_id'] == page_id].to_dict('records'):
sl = df_dict['text_offset']['start']['line_id']
so = df_dict['text_offset']['start']['offset']
el = df_dict['text_offset']['end']['line_id']
eo = df_dict['text_offset']['end']['offset']
attr = df_dict['attribute']
for il, l in enumerate(range(sl, el+1)):
if l not in loa2bio:
loa2bio[l] = {}
for io, o in enumerate(range(so, eo)):
if o not in loa2bio[l]:
loa2bio[l][o] = {}
if il == 0 and io == 0:
loa2bio[l][o][attr] = 'B'
else:
loa2bio[l][o][attr] = 'I'
return loa2bio
def _make_char_list_line(self, plain):
sbl = SearchBottomLine()
char_idx = 0
new_char_list, new_char_idx = [], 0
block_id, block_idx = 0, 0
for line_id, line in enumerate(plain):
line_flag = False
line = remove_line(line)
line_flag = sbl.search(line)
for offset, char in enumerate(line):
char_dict = {'line_id':line_id, 'offset':offset, \
'char_idx':char_idx, 'char':char}
char_idx += 1
if remove_char(char_dict, line_flag) == True:
continue
if block_idx >= self.max_length-2:
continue
new_char_dict = char_dict.copy()
new_char_dict['new_char_idx'] = new_char_idx
new_char_dict['block_id'] = block_id
new_char_dict['block_idx'] = block_idx
new_char_list.append(new_char_dict)
new_char_idx += 1
block_idx += 1
block_id += 1
block_idx = 0
return new_char_list
def _make_char_list_char(self, plain):
sbl = SearchBottomLine()
char_idx = 0
new_char_list, new_char_idx = [], 0
for line_id, line in enumerate(plain):
line_flag = False
line = remove_line(line)
line_flag = sbl.search(line)
for offset, char in enumerate(line):
char_dict = {'line_id':line_id, 'offset':offset, \
'char_idx':char_idx, 'char':char}
char_idx += 1
if remove_char(char_dict, line_flag) == True:
continue
new_char_dict = char_dict.copy()
new_char_dict['new_char_idx'] = new_char_idx
new_char_dict['block_id'] = new_char_idx // (self.max_length-2)
new_char_dict['block_idx'] = new_char_idx % (self.max_length-2)
new_char_list.append(new_char_dict)
new_char_idx += 1
return new_char_list
def _make_io(self, df_block, loa2bio):
chars = ['[CLS]']
labels = -torch.ones(len(self.attr2idx), len(df_block)+2).long()
for char_dict in df_block.to_dict('records'):
chars.append(char_dict['char'])
labels[:, char_dict['block_idx']+1] = self.bio2idx['O']
if char_dict['line_id'] in loa2bio:
if char_dict['offset'] in loa2bio[char_dict['line_id']]:
attr2bio = loa2bio[char_dict['line_id']][char_dict['offset']]
for attr, bio in attr2bio.items():
labels[self.attr2idx[attr], char_dict['block_idx']+1] = self.bio2idx[bio]
chars += ['[SEP]']
input_ids = torch.LongTensor(self.tokenizer.convert_tokens_to_ids(chars))
attention_masks = torch.ones(input_ids.shape).bool()
return input_ids, attention_masks, labels
def _make_dataset(self, ):
self.out_inputs, self.out_attention_masks = [], []
self.out_labels = []
self.data_num = 0
self.df_new = {}
self.page2title = {}
for page_id in sorted(self.page2plain.keys()):
loa2bio = self._make_loa2bio(page_id)
plain = self.page2plain[page_id]
self.page2title[page_id] = make_title(plain)
if self.block == 'line':
new_char_list = self._make_char_list_line(plain)
else:
new_char_list = self._make_char_list_char(plain)
df_new = pd.DataFrame(new_char_list)
self.df_new[page_id] = df_new
for block_id in sorted(set(df_new['block_id'])):
df_block = df_new[df_new['block_id'] == block_id]
inputs, attention_masks, labels = self._make_io(df_block, loa2bio)
self.out_inputs.append(inputs)
self.out_attention_masks.append(attention_masks)
self.out_labels.append(labels)
self.data_num += 1
class ShibaDataset(Dataset):
def __init__(self, page2plain, tokenizer, attr2idx, bio2idx, max_length, block, df_anno=None):
self.page2plain = page2plain
self.attr2idx = attr2idx
self.bio2idx = bio2idx
self.df_anno = df_anno
self.max_length = max_length
self.block = block
self.tokenizer = tokenizer
self.cls = self.tokenizer.CLS
self.pad = self.tokenizer.PAD
self._make_dataset()
def __len__(self):
return self.data_num
def __getitem__(self, idx):
return self.out_inputs[idx], self.out_attention_masks[idx], self.out_labels[idx]
def _make_loa2bio(self, page_id):
loa2bio = {}
if self.df_anno is not None:
for df_dict in self.df_anno[self.df_anno['page_id'] == page_id].to_dict('records'):
sl = df_dict['text_offset']['start']['line_id']
so = df_dict['text_offset']['start']['offset']
el = df_dict['text_offset']['end']['line_id']
eo = df_dict['text_offset']['end']['offset']
attr = df_dict['attribute']
for il, l in enumerate(range(sl, el+1)):
if l not in loa2bio:
loa2bio[l] = {}
for io, o in enumerate(range(so, eo)):
if o not in loa2bio[l]:
loa2bio[l][o] = {}
if il == 0 and io == 0:
loa2bio[l][o][attr] = 'B'
else:
loa2bio[l][o][attr] = 'I'
return loa2bio
def _make_char_list_line(self, plain):
sbl = SearchBottomLine()
char_idx = 0
new_char_list, new_char_idx = [], 0
block_id, block_idx = 0, 0
for line_id, line in enumerate(plain):
line_flag = False
line = remove_line(line)
line_flag = sbl.search(line)
for offset, char in enumerate(line):
char_dict = {'line_id':line_id, 'offset':offset, \
'char_idx':char_idx, 'char':char}
char_idx += 1
if remove_char(char_dict, line_flag) == True:
continue
if block_idx >= self.max_length-2:
continue
new_char_dict = char_dict.copy()
new_char_dict['new_char_idx'] = new_char_idx
new_char_dict['block_id'] = block_id
new_char_dict['block_idx'] = block_idx
new_char_list.append(new_char_dict)
new_char_idx += 1
block_idx += 1
block_id += 1
block_idx = 0
return new_char_list
def _make_char_list_char(self, plain):
sbl = SearchBottomLine()
char_idx = 0
new_char_list, new_char_idx = [], 0
for line_id, line in enumerate(plain):
line_flag = False
line = remove_line(line)
line_flag = sbl.search(line)
for offset, char in enumerate(line):
char_dict = {'line_id':line_id, 'offset':offset, \
'char_idx':char_idx, 'char':char}
char_idx += 1
if remove_char(char_dict, line_flag) == True:
continue
new_char_dict = char_dict.copy()
new_char_dict['new_char_idx'] = new_char_idx
new_char_dict['block_id'] = new_char_idx // (self.max_length-1)
new_char_dict['block_idx'] = new_char_idx % (self.max_length-1)
new_char_list.append(new_char_dict)
new_char_idx += 1
return new_char_list
def _make_io(self, df_block, loa2bio):
chars = [self.cls]
if len(df_block) <= 3:
labels = -torch.ones(len(self.attr2idx), len(df_block)+4).long()
else:
labels = -torch.ones(len(self.attr2idx), len(df_block)+1).long()
for char_dict in df_block.to_dict('records'):
chars.append(ord(char_dict['char']))
labels[:, char_dict['block_idx']+1] = self.bio2idx['O']
if char_dict['line_id'] in loa2bio:
if char_dict['offset'] in loa2bio[char_dict['line_id']]:
attr2bio = loa2bio[char_dict['line_id']][char_dict['offset']]
for attr, bio in attr2bio.items():
labels[self.attr2idx[attr], char_dict['block_idx']+1] = self.bio2idx[bio]
if len(df_block) <= 3:
chars += [self.pad, self.pad, self.pad]
input_ids = torch.LongTensor(chars)
attention_masks = (input_ids == -1)
return input_ids, attention_masks, labels
def _make_dataset(self, ):
self.out_inputs, self.out_attention_masks = [], []
self.out_labels = []
self.data_num = 0
self.df_new = {}
self.page2title = {}
for page_id in sorted(self.page2plain.keys()):
loa2bio = self._make_loa2bio(page_id)
plain = self.page2plain[page_id]
self.page2title[page_id] = make_title(plain)
if self.block == 'line':
new_char_list = self._make_char_list_line(plain)
else:
new_char_list = self._make_char_list_char(plain)
df_new = | pd.DataFrame(new_char_list) | pandas.DataFrame |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import html
import itertools
import os
import re
import feedparser
import pandas as pd
import requests
import zenhan
BRANCHS = ['jp', 'en', 'ru', 'ko', 'es', 'cn', 'cs',
'fr', 'pl', 'th', 'de', 'it', 'ua', 'pt', 'uo']
currentpath = os.path.dirname(os.path.abspath(__file__))
def get_country_from_code(brt):
if brt.isalpha():
brt = brt.upper()
try:
dictionary = pd.read_csv(
currentpath + "/data/ISO3166-1.CSV"
)
except FileNotFoundError as e:
print(e)
country = dictionary.query('二字 == @brt')
if country.empty:
return "該当する国コードは存在しません"
else:
country = country.values.tolist()
country = itertools.chain(*country)
country = list(country)
return country[0] + "支部はまだ存在しませんよ?"
else:
return "国コードが正しくありません."
def scp_number(msg):
msg = zenhan.z2h(msg.casefold()).replace("-", "").replace("scp", "")
number = re.sub("\\D", "", msg)
if number is (None and ""):
return None
brt = msg.replace(number, "")
if brt == "":
brt = "en"
if brt not in BRANCHS: # 要改良
reply = get_country_from_code(brt)
return reply
try:
dictionary = pd.read_csv(currentpath + "/data/scps.csv", index_col=0)
except FileNotFoundError as e:
print(e)
result = dictionary.query('branches in @brt')
result = result.query('url.str.contains(@number)', engine='python')
result = result[0:1].values.tolist()
result = itertools.chain(*result)
result = list(result)
if len(result) == 0 or number is re.sub("\\D", "", result[0]):
if len(number) > 4:
return None
if "en" in brt:
return("scp-" + str(number) + "はまだ存在しません")
else:
return("scp-" + str(number) + "-" + str(brt) + "はまだ存在しません")
return(result)
def src_tale(msg):
result = pd.DataFrame(columns=['url', 'title', 'author', 'branches'])
try:
dictionary = pd.read_csv(
currentpath +
f"/data/tale.csv",
index_col=0)
except FileNotFoundError as e:
print(e)
'''if brt is not "*":
dictionary = dictionary.query('branches in @brt')'''
dictionary_url = dictionary.query(
'url.str.contains(@msg)', engine='python')
dictionary_title = dictionary.query(
'title.str.contains(@msg)', engine='python')
dictionary_author = dictionary.query(
'author.str.contains(@msg)', engine='python')
result = pd.concat([dictionary_url, dictionary_title, dictionary_author])
result = result.drop_duplicates()
return result
def src_proposal(msg):
result = pd.DataFrame(columns=['url', 'title', 'author', 'branches'])
try:
dictionary = pd.read_csv(
currentpath +
f"/data/proposal.csv",
index_col=0)
except FileNotFoundError as e:
print(e)
dictionary_url = dictionary.query(
'url.str.contains(@msg)', engine='python')
dictionary_title = dictionary.query(
'title.str.contains(@msg)', engine='python')
result = pd.concat([dictionary_url, dictionary_title])
result = result.drop_duplicates()
return result
def src_joke(msg):
result = pd.DataFrame(columns=['url', 'title', 'author', 'branches'])
try:
dictionary = pd.read_csv(
currentpath +
f"/data/joke.csv",
index_col=0)
except FileNotFoundError as e:
print(e)
dictionary_url = dictionary.query(
'url.str.contains(@msg)', engine='python')
dictionary_title = dictionary.query(
'title.str.contains(@msg)', engine='python')
result = | pd.concat([dictionary_url, dictionary_title]) | pandas.concat |
from sqlalchemy import true
import FinsterTab.W2020.DataForecast
import datetime as dt
from FinsterTab.W2020.dbEngine import DBEngine
import pandas as pd
import sqlalchemy as sal
import numpy
from datetime import datetime, timedelta, date
import pandas_datareader.data as dr
def get_past_data(self):
"""
Get raw data from Yahoo! Finance for SPY during Great Recession
Store data in MySQL database
:param sources: provides ticker symbols of instruments being tracked
"""
# Assume that date is 2010
now = dt.date(2009, 1, 1) # Date Variables
start = now - timedelta(days=1500) # get date value from 5 years ago
end = now
# data will be a 2D Pandas Dataframe
data = dr.DataReader('SPY', 'yahoo', start, end)
symbol = [3] * len(data) # add column to identify instrument id number
data['instrumentid'] = symbol
data = data.reset_index() # no designated index - easier to work with mysql database
# Yahoo! Finance columns to match column names in MySQL database.
# Column names are kept same to avoid any ambiguity.
# Column names are not case-sensitive.
data.rename(columns={'Date': 'date', 'High': 'high', 'Low': 'low', 'Open': 'open', 'Close': 'close',
'Adj Close': 'adj close', 'Volume': 'volume'}, inplace=True)
data.sort_values(by=['date']) # make sure data is ordered by trade date
# send data to database
# replace data each time program is run
data.to_sql('dbo_paststatistics', self.engine, if_exists=('replace'),
index=False,
dtype={'date': sal.Date, 'open': sal.FLOAT, 'high': sal.FLOAT, 'low': sal.FLOAT,
'close': sal.FLOAT, 'adj close': sal.FLOAT, 'volume': sal.FLOAT})
# Tests the accuracy of the old functions
def accuracy(self):
query = 'SELECT * FROM dbo_algorithmmaster'
algorithm_df = pd.read_sql_query(query, self.engine)
query = 'SELECT * FROM dbo_instrumentmaster'
instrument_master_df = pd.read_sql_query(query, self.engine)
# Changes algorithm code
for code in range(len(algorithm_df)):
# Dynamic range for changing instrument ID starting at 1
for ID in range(1, len(instrument_master_df) + 1):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = %d AND b.instrumentid = %d AND a.algorithmcode = "%s"' % (
ID, ID, algorithm_df['algorithmcode'][code])
df = pd.read_sql_query(query, self.engine)
count = 0
# Calculates accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(len(df)):
absolute_percent_error.append(
abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / len(df)
# return the average percent error calculated above
print("Average percent error for instrument: %d and algorithm: %s " % (ID, algorithm_df['algorithmcode'][code]), average_percent_error)
#print('Algorithm:', algorithm_df['algorithmcode'][code])
#print('instrumentid: %d' % ID, instrument_master_df['instrumentname'][ID - 1])
#print('length of data is:', len(df))
#print('number correct: ', count)
d = len(df)
b = (count / d) * 100
#print('The accuracy is: %.2f%%\n' % b)
# Isolated tests for ARIMA as we where trying to determine why it was so accurate
def arima_accuracy(self):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = 1 AND b.instrumentid = 1 AND a.algorithmcode = "ARIMA"'
df = pd.read_sql_query(query, self.engine)
df = df.tail(10)
df = df.reset_index(drop=true)
#print(df)
arima_count = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x] \
or (df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
arima_count += 1
#print(df['close'], df['forecastcloseprice'])
#print(arima_count)
#print(arima_count/len(df))
# Accuracy test for the new function MSF1
def MSF1_accuracy(self):
# Queires the database to grab all of the Macro Economic Variable codes
query = "SELECT macroeconcode FROM dbo_macroeconmaster WHERE activecode = 'A'"
id = pd.read_sql_query(query, self.engine)
id = id.reset_index(drop=True)
# Queries the database to grab all of the instrument IDs
query = 'SELECT instrumentid FROM dbo_instrumentmaster'
id2 = pd.read_sql_query(query, self.engine)
id2 = id2.reset_index(drop=True)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# Bool to determine whether we append to dbo_tempvisualize or replace the values
to_append = False
# Create a for loop to iterate through all of the instrument ids
for v in id2['instrumentid']:
# Initializes a list for which we will eventually be storing all data to add to the macroeconalgorithm database table
data = []
# Data1 will be used to store the forecastdate, instrumentid, forecastprice, and algorithm code
# It will be used to graph our backtested forecast against the actual instrument prices
data1 = []
# Getting Dates for Future Forecast as well as actual close prices for instrumentID#
# We chose 2018 - 2020, to alter this date range simply change the dates in the 3rd line of the query for the dates you want to test on
# Make sure they are valid dates as some instruments only have statistics that go back so far, check the instrument statistic table to figure out how far back each instrument goes
query = "SELECT date, close FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, start_date, end_date)
# instrument_stats will hold the closing prices and the dates for the dates we are forecasting for
instrument_stats = pd.read_sql_query(query, self.engine)
# We isolate the dates and closing prices into individual arrays to make them easier to work with
date = []
close = []
for i in instrument_stats['date']:
date.append(i)
for i in instrument_stats['close']:
close.append(i)
# n will always correspond to the amount of dates, as the amount of dates is the number of data points being compared
n = len(date)
# Median_forecast will be a dictionary where the key is the date and the value is a list of forecasted prices
median_forecast = {}
# This disctionary will be used to easily combine all of the forecasts for different dates to determine the median forecast value
for i in date:
temp = {i: []}
median_forecast.update(temp)
# This query will grab quarterly instrument prices from between 2014 and the current date to be used in the forecasting
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, train_date, start_date)
# Executes the query and stores the result in a dataframe variable
df2 = pd.read_sql_query(query, self.engine)
# This for loop iterates through the different macro economic codes to calculate the percent change for each macroeconomic variable
for x in id['macroeconcode']:
# Retrieves the most recent macro economic statistics prior to the date for which we are testing our algorithm
query = "SELECT * FROM dbo_macroeconstatistics WHERE macroeconcode = {} and date <= {} ".format('"' + str(x) + '"', start_date)
df = pd.read_sql_query(query, self.engine)
macro = df.tail(n)
SP = df2.tail(n)
temp = df.tail(n + 1)
temp = temp.reset_index()
# Converts macro variables to precent change
macroPercentChange = macro
macro = macro.reset_index(drop=True)
SP = SP.reset_index(drop=True)
macroPercentChange = macroPercentChange.reset_index(drop=True)
for i in range(0, n):
if (i == 0):
macrov = (macro['statistics'][i] - temp['statistics'][i]) / temp['statistics'][i]
macroPercentChange['statistics'].iloc[i] = macrov * 100
else:
macrov = (macro['statistics'][i] - macro['statistics'][i - 1]) / macro['statistics'][i - 1]
macroPercentChange['statistics'].iloc[i] = macrov * 100
# Algorithm for forecast price
S = calc(self, macroPercentChange, SP,n) # Calculates the average GDP and S&P values for the given data points over n days and performs operations on GDP average
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# temp_price will be used to hold the previous forecast price for the next prediction
temp_price = 0
# Setup a for loop to calculate the final forecast price and add data to the list variable data
for i in range(n):
if isFirst:
if x in [2, 3, 4]:
temp_price = ((S * (SP['close'].iloc[n-1])) + (SP['close'].iloc[n-1]))
isFirst = False
else:
temp_price = ((S * SP['close'].iloc[n-1]) + SP['close'].iloc[n-1])
isFirst = False
else:
if x in [2, 3, 4]:
temp_price = ((S * temp_price) + temp_price)
else:
temp_price = ((S * temp_price) + temp_price)
# Once the forecast price is calculated append it to median_forecast list
median_forecast[date[i]].append(temp_price)
# Calculates the median value for each date using a list of prices forecasted by each individual macro economic variable
forecast_prices = []
for i in date:
# Sort the forecasted prices based on date
sorted_prices = sorted(median_forecast[i])
# calculate the median forecasted price for each date
if len(sorted_prices) % 2 == 0:
center = int(len(sorted_prices) / 2)
forecast_prices.append(sorted_prices[center])
else:
center = int(len(sorted_prices) / 2)
forecast_prices.append((sorted_prices[center] + sorted_prices[center - 1]) / 2)
# Set up a for loop to construct a list using variables associated with macroeconalgorithm database table
for i in range(len(forecast_prices)):
data.append([date[i], v, 'ALL', forecast_prices[i], close[i], 'MSF1', 0])
data1.append([date[i], v, forecast_prices[i], 'MSF1'])
# Convert data list to dataframe variable
df = pd.DataFrame(data, columns=['forecastdate', 'instrumentid', 'macroeconcode',
'forecastcloseprice', 'close', 'algorithmcode', 'prederror'])
df1 = pd.DataFrame(data1, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine, if_exists=('replace' if not to_append else 'append'), index=False)
to_append = True
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(n):
absolute_percent_error.append(abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / n
count = 0
# Calculates trend accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
length = len(df)
trend_error = (count / length) * 100
print("Trend accuracy for %s for instrument %d is %.2f%%" % ('MSF1', v, trend_error))
print("The average percent error for %s for instrument %d is %.2f%%" % ('MSF1', v, average_percent_error * 100))
# return the average percent error calculated above
# This function is not currently used, it can be used to check the accuracy of MSF2 but will need set weightings
# The functions below this one will test the accuracy using a variety of weightings and choose the weightings with the best results
def MSF2_accuracy(self):
n = 8
#Gets the macro economic variables codes and names to loop through the inidividual macro variables
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
macrocodes = []
indicators = {}
for i in range(len(data['macroeconcode'])):
macrocodes.append(data['macroeconcode'].loc[i])
d = {data['macroeconcode'].loc[i]: []}
indicators.update(d)
#Gets the instrument ids to loop through the individual instruments
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data = pd.read_sql_query(query, self.engine)
instrumentids = []
for i in data['instrumentid']:
instrumentids.append(i)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
#Loops through each instrument id to preform error calculations 1 instrument at a time
for i in instrumentids:
#Gets the instrument statistics to run through the function
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, train_date, start_date)
train_data = pd.read_sql_query(query, self.engine)
#Gets the instrument statistics to check against the forecast prices
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, start_date, end_date)
check_data = | pd.read_sql_query(query, self.engine) | pandas.read_sql_query |
#!/usr/bin/env python
from itertools import combinations
import random
import scanpy.api as sc
import matplotlib.pyplot as plt
import numpy as np
from granatum_sdk import Granatum
import pandas as pd
import seaborn as sns
def main():
gn = Granatum()
tb1 = gn.pandas_from_assay(gn.get_import('assay1'))
tb2 = gn.pandas_from_assay(gn.get_import('assay2'))
label1 = gn.get_arg('label1')
label2 = gn.get_arg('label2')
direction = gn.get_arg('direction')
normalization = gn.get_arg('normalization')
if direction == 'samples':
tb1 = tb1.T
tb2 = tb2.T
overlapped_index = set(tb1.index) & set(tb2.index)
tb1.index = [f"{label1}_{x}" if x in overlapped_index else x for x in tb1.index]
tb2.index = [f"{label2}_{x}" if x in overlapped_index else x for x in tb2.index]
if normalization == 'none':
tb = | pd.concat([tb1, tb2], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import sys
import os
clear = lambda: os.system('cls')
clear()
print("\n3. FILTRO BASADO EN CONTENIDO: PALABRAS CLAVES\n")
path="ml-latest-small"
movies = pd.read_csv(path+'/moviesES.csv', sep=',', encoding='latin-1', usecols=['movieId', 'title', 'genres'])
ratings = pd.read_csv(path+'/ratings.csv', sep=',', encoding='latin-1', usecols=['movieId', 'rating'])
tags = pd.read_csv(path+'/tags.csv', sep=',', encoding='latin-1', usecols=['movieId', 'tag'])
tags['movieId'] = tags['movieId'].fillna(0)
ratings['movieId'] = ratings['movieId'].fillna(0)
dataset = | pd.merge(movies, tags, on='movieId') | pandas.merge |
# First, we append the previous level to the sys.path var:
import sys, os
# We append the repository path to the sys.path so that we can import packages easily.
sys.path.append(os.path.expandvars('${HOME}/Desktop/quant-research-env/'))
# Import the different classes:
from darwinexapis.API.DarwinDataAnalyticsAPI.DWX_Data_Analytics_API import DWX_Darwin_Data_Analytics_API
from darwinexapis.API.InfoAPI.DWX_Info_API import DWX_Info_API
from darwinexapis.API.TradingAPI.DWX_Trading_API import DWX_Trading_API
from darwinexapis.API.InvestorAccountInfoAPI.DWX_AccInfo_API import DWX_AccInfo_API
# Import the logger:
import logging, time, websockets, json
import pandas as pd, numpy as np
from datetime import datetime
logger = logging.getLogger()
class DTestingMethods(object):
def __init__(self):
### Let's create the auth credentials:
self.AUTH_CREDS = {'access_token': '<PASSWORD>',
'consumer_key': 'Z4_p3FDLhI5x9pMlYWHvyiWW04Qa',
'consumer_secret': 'NR6hDOCbjJEfYzB2Hg1B9nfHhpAa',
'refresh_token': '<PASSWORD>'}
# Create the objects:
self._defineAPIObjects()
# Call investorAccs at the beginning:
self._listInvestorAccounts()
######################################## Auxiliary methods ########################################
def _defineAPIObjects(self, isDemo=True):
# Get the other APIs:
self.INFO_API = DWX_Info_API(self.AUTH_CREDS, _version=2.0, _demo=isDemo)
self.ACCOUNT_API = DWX_AccInfo_API(self.AUTH_CREDS, _version=2.0, _demo=isDemo)
self.TRADING_API = DWX_Trading_API(self.AUTH_CREDS, _version=1.1, _demo=isDemo)
def _checkInvalidCredentials(self, response, apiObject, methodCall):
# Check for the text:
if 'Invalid Credentials' in response:
# Generate new credentials:
logger.warning('[INVALID_CREDS] - Invalid credentials > ¡Generate TOKENS!')
apiObject.AUTHENTICATION._get_access_refresh_tokens_wrapper()
# Make the API call again:
methodCall()
else:
# Creds are okey:
logger.warning('[INVALID_CREDS] - Credentials are OKEY')
def _assertRequestResponse(self, response):
# Print response:
logger.warning(response)
# Print status_code and request boolean:
#logger.warning(response.status_code)
#logger.warning(response.ok)
def _convertToDataFrame(self, response, filterOrNot):
try:
if isinstance(response, list):
if filterOrNot:
response = pd.DataFrame(response)[filterOrNot]
logger.warning(response)
else:
response = pd.DataFrame(response)
logger.warning(response)
return response
elif isinstance(response, dict):
if filterOrNot:
response = pd.DataFrame.from_dict([response])[filterOrNot]
logger.warning(response)
else:
response = pd.DataFrame.from_dict([response])
logger.warning(response)
return response
except Exception as ex:
logger.warning(f'EXCEPTION > Could not convert to DataFrame: {ex}')
return None
def _getClosesDataFrame(self, response):
# Filter the dictionary and just get the close:
self.newDict = {key : value['close'] for key, value in response.items()}
# Convert to dataframe:
DF_CLOSE = pd.DataFrame.from_dict(self.newDict)
#DF_CLOSE.to_csv(os.path.expandvars('${HOME}/Desktop/quant-research-env/DARWINStrategyContentSeries/Data/') + 'ClosePricePortfolio.csv')
#DF_CLOSE.to_csv(os.path.expandvars('${HOME}/Desktop/') + 'ClosePricePortfolio.csv')
print('DATAFRAME SAVED')
# Drop NaNs:
#logger.warning(f'Quantity of NaNs: {DF_CLOSE.isnull().sum().sum()}')
DF_CLOSE.dropna(axis=0, inplace=True)
DF_CLOSE.dropna(axis=1, inplace=True)
#logger.warning(f'Quantity of NaNs: {DF_CLOSE.isnull().sum().sum()}')
# Return it:
return DF_CLOSE
######################################## Auxiliary methods ########################################
######################################## DARWIN APIs Requests ########################################
def _listInvestorAccounts(self):
# Get response:
RETURNED_RESPONSE = self.ACCOUNT_API._Get_Accounts_()
self._assertRequestResponse(RETURNED_RESPONSE)
#self._checkInvalidCredentials(RETURNED_RESPONSE, self.ACCOUNT_API, self.ACCOUNT_API._Get_Accounts_)
# Convert response:
RETURNED_RESPONSE = self._convertToDataFrame(RETURNED_RESPONSE, [])
# Get equity:
self.investedFraction = RETURNED_RESPONSE.loc[0, 'invested'] / RETURNED_RESPONSE.loc[0, 'equity']
logger.warning(self.investedFraction)
def _currentPositions(self, accountID):
# Get response:
RETURNED_RESPONSE = self.ACCOUNT_API._Get_Current_Open_Positions_(_id=accountID)
self._assertRequestResponse(RETURNED_RESPONSE)
print('#################################################')
# Convert response:
if RETURNED_RESPONSE:
RETURNED_RESPONSE = self._convertToDataFrame(RETURNED_RESPONSE, ['productName','invested','allocation','leverage','openPnl', 'currentQuote'])
return RETURNED_RESPONSE
else:
# If positions are none, it will return an empty list.
return
def _getAllocationsAndTrades(self, finalAllocationsDF, accountID, totalAuMPercentage=0.95):
# Get accounts and equity values:
ACCOUNT_VALUES = self.ACCOUNT_API._Get_Accounts_()
ACCOUNT_VALUES = self._convertToDataFrame(ACCOUNT_VALUES, [])
# Get invested fraction with the equite and invested capital:
equityValue = ACCOUNT_VALUES.loc[ACCOUNT_VALUES['id']==accountID, 'equity'][0] * totalAuMPercentage
investedValue = ACCOUNT_VALUES.loc[ACCOUNT_VALUES['id']==accountID, 'invested'][0]
investedFraction = round(investedValue / equityValue, 2)
# Get the actual positions allocations:
# The allocations are based on the actual positions equity, not on the total equity.
ACTUAL_POSITIONS = self._currentPositions(accountID=accountID)
ACTUAL_POSITIONS['allocation'] = ACTUAL_POSITIONS['allocation'] / 100
# Change the names in the productName col:
ACTUAL_POSITIONS['productName'] = ACTUAL_POSITIONS['productName'].apply(lambda x: x.split('.')[0])
# Get the allocations based on all the equity:
ACTUAL_POSITIONS['allocation_total'] = round(ACTUAL_POSITIONS['allocation'] * investedFraction, 2)
# Get actual allocations apart:
actualAllocationsSeries = ACTUAL_POSITIONS['allocation_total']
# The DataFrame should have columns like final_allocations_total and productName
# Concat the new DataFrame with the allocations for ALL the DARWINS (with and without positions):
# It will create a new column called final_allocations_total.
ACTUAL_POSITIONS_CONCAT = pd.concat([ACTUAL_POSITIONS, finalAllocationsDF], ignore_index=True).fillna(0)
# Aggregate with last to get duplicates out and get the last ones:
ACTUAL_POSITIONS_CONCAT_AGG = ACTUAL_POSITIONS_CONCAT.groupby('productName').agg('last').reset_index()
# Do the final_allocations_total_capital and final_rebalances at the end:
# Put the final_allocations_total_capital on a column:
ACTUAL_POSITIONS['final_allocations_total_capital'] = round(ACTUAL_POSITIONS['final_allocations_total'] * equityValue, 2)
logger.warning(ACTUAL_POSITIONS['final_allocations_total_capital'].sum())
# Get the final rebalance values:
ACTUAL_POSITIONS['final_rebalances'] = ACTUAL_POSITIONS['final_allocations_total_capital'] - ACTUAL_POSITIONS['invested']
# Get view of the dataframe:
TRADES = ACTUAL_POSITIONS[['productName', 'final_rebalances']].set_index('productName').to_dict()['final_rebalances']
TRADES = {eachKey : round(eachValue, 2) for eachKey, eachValue in TRADES.items()}
#TRADES_1 = ACTUAL_POSITIONS[['productName', 'final_allocations_total_capital']].set_index('productName').to_dict()['final_allocations_total_capital']
#TRADES_1 = {eachKey : round(eachValue, 2) for eachKey, eachValue in TRADES_1.items()}
### TRIAL: APPEND ROWS
df = pd.read_csv('/home/eriz/Desktop/tries.csv', index_col=0)
logger.warning(df)
#ACTUAL_POSITIONS = ACTUAL_POSITIONS.concat(df)
ACTUAL_POSITIONS.to_csv('/home/eriz/Desktop/tries2.csv')
logger.warning(ACTUAL_POSITIONS)
#logger.warning(TRADES)
#logger.warning(TRADES_1)
def _getAllocationsAndTradesNEW(self, finalAllocationsDict, accountID, totalAuMPercentage=0.95):
# Get accounts and equity values:
ACCOUNT_VALUES = self.ACCOUNT_API._Get_Accounts_()
ACCOUNT_VALUES = self._convertToDataFrame(ACCOUNT_VALUES, [])
# Get invested fraction with the equite and invested capital:
equityValue = ACCOUNT_VALUES.loc[ACCOUNT_VALUES['id']==accountID, 'equity'][0] * totalAuMPercentage
investedValue = ACCOUNT_VALUES.loc[ACCOUNT_VALUES['id']==accountID, 'invested'][0]
investedFraction = round(investedValue / equityValue, 2)
# Get the actual positions allocations:
# The allocations are based on the actual positions equity, not on the total equity.
ACTUAL_POSITIONS = self._currentPositions(accountID=accountID)
ACTUAL_POSITIONS['allocation'] = ACTUAL_POSITIONS['allocation'] / 100
# Change the names in the productName col:
ACTUAL_POSITIONS['productName'] = ACTUAL_POSITIONS['productName'].apply(lambda x: x.split('.')[0])
# Get the allocations based on all the equity:
ACTUAL_POSITIONS['allocation_total'] = round(ACTUAL_POSITIONS['allocation'] * investedFraction, 2)
logger.warning(ACTUAL_POSITIONS)
# Get the dictionary of allocation_total + productName:
ACTUAL_POS_DICT = ACTUAL_POSITIONS.set_index('productName').to_dict()['allocation_total']
logger.warning(ACTUAL_POS_DICT)
ACTUAL_POS_DICT = {'HFD': 0.56, 'SYO': 0.36, 'HLS': 0.08}
logger.warning(ACTUAL_POS_DICT)
# Pass to the trades calculation method:
FINAL_ALLOCATIONS = self._finalTradesCalculation(ACTUAL_POS_DICT, finalAllocationsDict, equityValue)
logger.warning(FINAL_ALLOCATIONS)
# Return them:
#return FINAL_ALLOCATIONS
def _finalTradesCalculation(self, actualAlloDict, finalAlloDict, actualEquity):
# Set the new dictionary:
endAllocations = {}
# We loop with the final allocations to the actual ones:
for finalAsset, finalAllocation in finalAlloDict.items():
for actualAsset, actualAllocation in actualAlloDict.items():
# If we actually have a position in that asset, calculate:
if actualAsset == finalAsset:
# Get the change in capital we need to make:
capitalFinal = (finalAllocation * actualEquity) - (actualAllocation * actualEquity)
# Add it and add a boolean flag if we had actually the position (True) or not (False)
endAllocations[finalAsset] = [round(capitalFinal,2), True]
# If we don't have a position, add it or not:
elif actualAsset != finalAsset:
# If it is actually present, just pass.
if finalAsset in endAllocations:
pass
# If it is not, add it with the final allocation capital.
else:
# Get the capital we need to put there:
capitalFinal = finalAllocation * actualEquity
# Add it and add a boolean flag if we had actually the position (True) or not (False)
endAllocations[finalAsset] = [round(capitalFinal,2), False]
# Return the final dictionary:
return endAllocations
def _closeAllPositions(self, accountID):
# Close all positions:
RETURNED_RESPONSE = self.TRADING_API._Close_All_Account_Trades_(_id=accountID)
self._assertRequestResponse(RETURNED_RESPONSE)
# Convert response:
RETURNED_RESPONSE = self._convertToDataFrame(RETURNED_RESPONSE, [])
def _closeDARWINPosition(self, accountID, darwinToClose):
# Close specific darwin position:
# darwinToClose can be the string of just the DARWIN name or with the suffix: KLG OR KLG.5.2
RETURNED_RESPONSE = self.TRADING_API._Close_All_DARWIN_Trades_(_id=accountID, _darwin=darwinToClose)
self._assertRequestResponse(RETURNED_RESPONSE)
# Convert response:
RETURNED_RESPONSE = self._convertToDataFrame(RETURNED_RESPONSE, [])
######################################## DARWIN APIs Requests ########################################
######################################## Analysis API ########################################
def _createCandlePortfolio(self, symbols):
# Get DARWINs:
RETURNED_RESPONSE = self.INFO_API._Get_DARWIN_OHLC_Candles_(_symbols=symbols,
_resolution='1d', # 1m, 5m, 15m, 30m, 1h, 4h, 1d, 1w, 1mn
_from_dt='2019-08-31 12:00:00', # UTC > All need to have data up to the this dt
_to_dt=str(pd.Timestamp('now')),
_timeframe='/ALL') # 1D, 1W, 1M, 3M, 6M, 1Y, 2Y, ALL
self._assertRequestResponse(RETURNED_RESPONSE)
# Create a dataframe with just the close of each DARWIN:
RETURNED_RESPONSE = self._getClosesDataFrame(RETURNED_RESPONSE)
logger.warning(RETURNED_RESPONSE)
# Get actual quotes:
ACTUAL_POSITIONS = self._currentPositions(accountID=2000062056)
ACTUAL_POSITIONS_QUOTES = ACTUAL_POSITIONS['currentQuote'].values
logger.warning(ACTUAL_POSITIONS_QUOTES)
# Add last quote for every darwin:
# Put the same timestamp as others.
newData = pd.Series(ACTUAL_POSITIONS_QUOTES,
index=RETURNED_RESPONSE.columns,
name=datetime.now().replace(hour=21, minute=0, second=0, microsecond=0)) # name=datetime.now().strftime("%Y-%m-%d"))
RETURNED_RESPONSE_2 = RETURNED_RESPONSE.append(newData, verify_integrity=True)
logger.warning(RETURNED_RESPONSE_2)
def _createFilteredPortfolio(self):
# Get filtered DARWINs:
while True:
# If the hour is X, do something:
#if datetime.now().hour == 9:
RETURNED_RESPONSE = self.INFO_API._Get_Filtered_DARWINS_(_filters=[['d-score', 80, 100, 'actual'],
#['drawdown', -10, 0, '6m'],
['return', 3, 100, '1m']],
_order=['return','12m','DESC'],
_page=0, # Sets the page we want to start from
_perPage=50, # Sets the items per page we want to get
_delay=1.0)
self._assertRequestResponse(RETURNED_RESPONSE)
FILTERED_DARWIN_SYMBOLS = RETURNED_RESPONSE['productName'].to_list()
FILTERED_DARWIN_SYMBOLS = [eachSymbol.split('.')[0] for eachSymbol in FILTERED_DARWIN_SYMBOLS]
logger.warning(FILTERED_DARWIN_SYMBOLS)
# NOTE: Maintain the loop and recall the API to check if working.
# Sleep for an hour more or less to not call any more in the day.
time.sleep(60)
#else:
# pass
def _createLastQuotes(self, symbols):
# Get DARWINs:
RETURNED_RESPONSE = self.INFO_API._Get_DARWIN_OHLC_Candles_(_symbols=symbols,
_resolution='1m', # 1m, 5m, 15m, 30m, 1h, 4h, 1d, 1w, 1mn
_from_dt=pd.to_datetime('today') - pd.to_timedelta(5, 'min'),
_to_dt=str( | pd.Timestamp('now') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 17:50:04 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
df = pd.read_csv(r'CoinDatasets\ripple_price.csv')
print(df)
#Dataframe indexlenmesi :
#1)Sütunsal indexleyebilirim ve sütun indexini verip çağırabilirim
#2)dataframe iloc-loc ile indexlenirse satırın tamamı elde edilir
#2.1)df.iloc yaparsam tek bir satırı elde edersem series olur,bir grup satırı elde etmek istersem dataframe olur.[Satırlar için iloc kullan,satırın numarasıyla işlem yapar]
#df.iloc[5:7] :Data frame alır
#df.iloc[5] :series olarak alır
#df.iloc[[1,2]]=1 ve 2.satırları alırım
#--------------------------
import pandas as pd
import numpy as np
df = pd.read_csv(r'CoinDatasets\ripple_price.csv')
result = df.iloc[10:20]
print(result)
#2.2)df.loc yaparsam tek bir satırı elde edersem series olur,bir grup satırı elde etmek istersem dataframe olur.[index isimleriyle ele geçiriyorum]
#df.loc['A'] = label ismini verdim ve eriştim
#3)Dataframe nesnesi ile indexleme: Sütunları ele geçiririm bu yöntemle :Sütun ismini ele geçirip işlem yapar
#tek sütun ele geçirilirse series çok sütun ele geçirilirse dataframe olur
#--------------------------
import pandas as pd
import numpy as np
df = pd.read_csv(r'CoinDatasets\ripple_price.csv')
result = df[['Date', 'Open', 'Close']]
print(result)
#bir satırda herhangi bir kriter uygulamak isterm:
import pandas as pd
import numpy as np
df = pd.read_csv(r'CoinDatasets\ripple_price.csv')
result = df.iloc[(df['Open'] > 2).values] #open columnda 2 den büyük olanları getirdim
print(result)
#iloc kullanmadan dataframe indexlemesiyle koşul belirtme
import pandas as pd
import numpy as np
df = | pd.read_csv(r'CoinDatasets\ripple_price.csv') | pandas.read_csv |
from flask import Flask, request, jsonify
import traceback
import pandas as pd
import joblib
import sys
# Your API definition
app = Flask(__name__)
@app.route("/predict", methods=['GET','POST']) #use decorator pattern for the route
def predict():
if lr:
try:
json_ = request.json
print(json_)
query= | pd.DataFrame(json_) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
data = pd.read_csv(path)
data.rename(index=str,columns={"Total":"Total_Medals"},inplace=True)
print(data.head(10))
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer',np.where(data['Total_Summer'] < data['Total_Winter'],'Winter',np.where(data['Total_Summer'] == data['Total_Winter'],'Both',np.nan)))
#print(data.head(10))
better_event = data['Better_Event'].value_counts().sort_values(ascending=False).idxmax()
print(better_event)
# --------------
#Code starts here
top_countries = | pd.DataFrame(data,columns=['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']) | pandas.DataFrame |
""" Veracity Data Fabric API
"""
from typing import Any, AnyStr, Mapping, Sequence
from urllib.error import HTTPError
import pandas as pd
from azure.storage.blob.aio import ContainerClient
from .base import ApiBase
class DataFabricError(RuntimeError):
pass
class DataFabricAPI(ApiBase):
""" Access to the data fabric endpoints (/datafabric) in the Veracity API.
All web calls are async using aiohttp. Returns web responses exactly as
received, usually JSON.
Arguments:
credential (veracity.Credential): Provides oauth access tokens for the
API (the user has to log in to retrieve these unless your client
application has permissions to use the service.)
subscription_key (str): Your application's API subscription key. Gets
sent in th Ocp-Apim-Subscription-Key header.
version (str): Not currently used.
"""
API_ROOT = "https://api.veracity.com/veracity/datafabric"
def __init__(self, credential, subscription_key, version=None, **kwargs):
super().__init__(credential, subscription_key, scope=kwargs.pop('scope', 'veracity_datafabric'), **kwargs)
self._url = f"{DataFabricAPI.API_ROOT}/data/api/1"
self.sas_cache = {}
self.access_cache = {}
@property
def url(self):
return self._url
# APPLICATIONS.
async def get_current_application(self):
url = f'{self._url}/application'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
async def get_application(self, applicationId):
url = f'{self._url}/application/{applicationId}'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
async def add_application(self, *args, **kwargs):
raise NotImplementedError()
async def update_application_role(self, applicationId, role):
url = f'{self._url}/application/{applicationId}?role={role}'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
# GROUPS.
async def get_groups(self):
raise NotImplementedError()
async def add_group(self, *args, **kwargs):
raise NotImplementedError()
async def get_group(self, groupId):
raise NotImplementedError()
async def update_group(self, groupId, *args, **kwargs):
raise NotImplementedError()
async def delete_group(self, groupId):
raise NotImplementedError()
# KEY TEMPLATES.
async def get_keytemplates(self):
url = f'{self._url}/keytemplates'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
# LEDGER.
async def get_ledger(self, containerId: AnyStr) -> pd.DataFrame:
url = f'{self._url}/resource/{containerId}/ledger'
resp = await self.session.get(url)
data = await resp.json()
if resp.status == 200:
df = pd.DataFrame(data)
df['dateOfEvent'] = pd.to_datetime(df['dateOfEvent'], format="%Y-%m-%dT%H:%M:%SZ")
return df
elif resp.status == 403:
raise DataFabricError(f'HTTP/403 Must be data owner or steward to view container {containerId} ledger. Details:\n{data}')
elif resp.status == 404:
raise DataFabricError(f'HTTP/404 Data Fabric container {containerId} does not exist. Details:\n{data}')
else:
raise HTTPError(url, resp.status, data, resp.headers, None)
# RESOURCES.
# ACCESS.
async def get_best_access(self, containerId: AnyStr) -> pd.Series:
""" Gets the best available access share ID for a Veracity container.
Returns the access share ID with the highest available privileges.
"""
app = await self.get_current_application()
all_accesses = await self.get_accesses_df(containerId)
my_accesses = all_accesses[all_accesses['userId'] == app['id']]
best_index = my_accesses['level'].astype(float).idxmax()
return my_accesses.loc[best_index]
async def get_accesses(self, resourceId: AnyStr, pageNo: int = 1, pageSize: int = 50) -> Mapping[AnyStr, Any]:
url = f'{self._url}/resources/{resourceId}/accesses?pageNo={pageNo}&pageSize={pageSize}'
resp = await self.session.get(url)
if resp.status != 200:
raise HTTPError(url, resp.status, await resp.text(), resp.headers, None)
data = await resp.json()
return data
async def get_accesses_df(self, resourceId: AnyStr, pageNo: int = 1, pageSize: int = 50) -> pd.DataFrame:
""" Gets the access levels as a dataframe, including the "level" value.
"""
import pandas as pd
data = await self.get_accesses(resourceId, pageNo, pageSize)
df = pd.DataFrame(data['results'])
# Add the level values for future use.
df['level'] = self._access_levels(df)
self.access_cache[resourceId] = df
return df
async def share_access(self, resourceId: AnyStr, autoRefresh: bool, *args, **kwargs):
raise NotImplementedError()
async def revoke_access(self, resourceId: AnyStr, accessId: AnyStr):
raise NotImplementedError()
async def get_sas(self, resourceId: AnyStr, accessId: AnyStr = None, **kwargs) -> pd.DataFrame:
key = self.get_sas_cached(resourceId) or await self.get_sas_new(resourceId, accessId, **kwargs)
return key
async def get_sas_new(self, resourceId: AnyStr, accessId: AnyStr = None, **kwargs) -> pd.DataFrame:
""" Gets a new SAS key to access a container.
You can request a key with a specific access level (if you have the
accessId). By default this method will attempt to get the most
permissive access level available for the active credential.
Args:
resourceId (str): The container ID.
accessId (str): Access level ID, optional.
"""
if accessId is not None:
access_id = accessId
else:
access = await self.get_best_access(resourceId)
access_id = access.get('accessSharingId')
assert access_id is not None, 'Could not find access rights for current user.'
url = f'{self._url}/resources/{resourceId}/accesses/{access_id}/key'
resp = await self.session.put(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
# The API response does not include the access ID; we add for future use.
data['accessId'] = access_id
self.sas_cache[resourceId] = data
return data
def get_sas_cached(self, resourceId: AnyStr) -> pd.DataFrame:
from datetime import datetime, timezone
import dateutil
sas = self.sas_cache.get(resourceId)
if not sas:
return None
expiry = dateutil.parser.isoparse(sas['sasKeyExpiryTimeUTC'])
if (not sas['isKeyExpired']) and (datetime.now(timezone.utc) < expiry):
return sas
else:
# Remove the expired key from the cache.
self.sas_cache.pop(resourceId)
return None
def _access_levels(self, accesses: pd.DataFrame) -> pd.Series:
""" Calculates an access "level" for each access in a dataframe.
In general higher access level means more privileges.
Notes:
Attributes related to permissions in this way:
| Attribute | Permission | Score |
| --------- | ---------- | ----- |
| attribute1 | Read | 4 |
| attribute2 | Write | 1 |
| attribute3 | Delete | 8 |
| attribute4 | List | 2 |
Scores are additive, so "read, write & list" = 7. If you want to
check an access has delete privileges, use level >= 8.
Write is considered the lowest privilege as it does not allow data to
be seen.
Args:
accesses (pandas.DataFrame): Accesses as returned by :meth:`get_accesses`.
Returns:
Pandas Series with same index as input.
"""
import numpy as np
scores = np.array([4, 1, 8, 2])
attrs = accesses[['attribute1', 'attribute2', 'attribute3', 'attribute4']].to_numpy()
levels = (attrs * scores).sum(axis=1)
return | pd.Series(levels, index=accesses.index, dtype='Int64') | pandas.Series |
def three_way_ANOVA(df_list):
f3_len = len(df_list)
f1_len, f2_len = len(df_list[0].columns), len(df_list[0].index)
# それぞれの因子の効果を求める
f1_mean = sum([df.mean(axis=1) for df in df_list]) / f3_len
f2_mean = sum([df.mean() for df in df_list]) / f3_len
f3_mean = pd.Series([df.mean().mean() for df in df_list])
f_mean = sum([df.mean().mean() for df in df_list]) / f3_len
f1_effect, f2_effect, f3_effect = f1_mean - f_mean, f2_mean - f_mean, f3_mean - f_mean
# 因子変動S1,S2,S3を求める
S1 = ((f1_effect**2) * (f1_len*f3_len)).sum()
S2 = ((f2_effect**2) * (f2_len*f3_len)).sum()
S3 = ((f3_effect**2) * (f1_len*f2_len)).sum()
# 因子1,2の交互作用による変動S12を求める
df_12 = (sum(df_list) / f3_len) - f_mean
S1_2 = (df_12**2).sum().sum() * f3_len
S12 = S1_2 - S1 - S2
# 因子1,3の交互作用による変動S13を求める
df_13 = pd.DataFrame([df.mean(axis=1) for df in df_list]) - f_mean
S1_3 = (df_13**2).sum().sum() * f1_len
S13 = S1_3 - S1 - S3
# 因子2,3の交互作用による変動S23を求める
df_23 = pd.DataFrame([df.mean() for df in df_list]) - f_mean
S2_3 = (df_23**2).sum().sum() * f2_len
S23 = S2_3 - S2 - S3
# 誤差変動Seを求める
St = sum([((df-f_mean)**2).sum().sum() for df in df_list])
Se = St - S1 - S2 - S3 - S12 - S13 - S23
# 自由度dfを求める
df1 = f2_len - 1
df2 = f1_len - 1
df3 = f3_len - 1
df12 = df1 * df2
df13 = df1 * df3
df23 = df2 * df3
dfe = df1 * df2 * df3
dft = df1 + df2 + df3 + df12 + df13 + df23 + dfe
# 不偏分散Vを求める
V1 = S1 / df1
V2 = S2 / df2
V3 = S3 / df3
V12 = S12 / df12
V13 = S13 / df13
V23 = S23 / df23
Ve = Se / dfe
# F値を求める
F1 = V1 / Ve
F2 = V2 / Ve
F3 = V3 / Ve
F12 = V12 / Ve
F13 = V13 / Ve
F23 = V23 / Ve
# p値を求める
p1 = 1 - st.f.cdf(F1, dfn=df1, dfd=dfe)
p2 = 1 - st.f.cdf(F2, dfn=df2, dfd=dfe)
p3 = 1 - st.f.cdf(F3, dfn=df3, dfd=dfe)
p12 = 1 - st.f.cdf(F12, dfn=df12, dfd=dfe)
p13 = 1 - st.f.cdf(F13, dfn=df13, dfd=dfe)
p23 = 1 - st.f.cdf(F23, dfn=df23, dfd=dfe)
# 分散分析表を作成する
df_S = | pd.Series([S1, S2, S3, S12, S13, S23, Se, St]) | pandas.Series |
import pandas as pd
import numpy as np
df_cache = {}
def _preprocess(df):
df['time_unix'] = pd.to_datetime(df.EventTime).astype(np.int64) // 10 ** 6
# First Text Change basically only happens in one item, and is redundant with the action before
# print(df[df.Observable == 'First Text Change'].AccessionNumber.value_counts())
df = df[df.Observable != 'First Text Change'] # Hence, remove it
df = df[df.Observable != 'Exit Item'] # Basically 100% redundant with "Enter Item"
# "Calculator Buffer" is a better indicator of closing the calculator, because sometimes it gets
# automatically closed if the student opens a new scratch area or switches problems
df = df[df.Observable != 'Close Calculator']
df = df[df.EventTime.notnull()] # Two null rows causing some outliers in delta time
df = df.loc[(df.shift(1) != df).any(axis=1)] # Remove consecutive duplicate rows (keep first)
df['delta_time_ms'] = 0
for pid, pid_df in df.groupby('STUDENTID'):
df.loc[pid_df.index, 'delta_time_ms'] = \
(pid_df.time_unix.shift(-1) - pid_df.time_unix).fillna(0)
df['EventTime'] = pd.to_datetime(df.EventTime)
return df
def train_full():
if 'train_full' not in df_cache:
df = pd.read_csv('public_data/data_a_train.csv')
label_df = pd.read_csv('public_data/data_train_label.csv')
assert len(df.STUDENTID.unique()) == len(label_df.STUDENTID.unique())
gt = {p: int(l) for p, l in label_df.values}
df['label'] = [gt[p] for p in df.STUDENTID.values]
df_cache['train_full'] = _preprocess(df)
return df_cache['train_full'].copy()
def train_10m():
# Return only first 10 minutes of data per participant
if 'train_10m' not in df_cache:
df = train_full()
start_unix_map = {p: v.time_unix.min() for p, v in df.groupby('STUDENTID')}
df['start_unix'] = [start_unix_map[p] for p in df.STUDENTID]
df_cache['train_10m'] = df[df.time_unix < df.start_unix + 10 * 60 * 1000] \
.drop(columns='start_unix')
return df_cache['train_10m'].copy()
def train_20m():
# Return only first 20 minutes of data per participant
if 'train_20m' not in df_cache:
df = train_full()
start_unix_map = {p: v.time_unix.min() for p, v in df.groupby('STUDENTID')}
df['start_unix'] = [start_unix_map[p] for p in df.STUDENTID]
df_cache['train_20m'] = df[df.time_unix < df.start_unix + 20 * 60 * 1000] \
.drop(columns='start_unix')
return df_cache['train_20m'].copy()
def holdout_10m():
if 'holdout_10m' not in df_cache:
df_cache['holdout_10m'] = _preprocess( | pd.read_csv('public_data/data_a_hidden_10.csv') | pandas.read_csv |
"""
Created on Thu Nov 7, 2019
@author: <NAME>
"""
import serial # `pyserial` package; NOT `serial` package
import warnings
import pandas as pd
import numpy as np
import time
import os
import sys
from datetime import datetime
try:
from serial.tools import list_ports
IMPORTED_LIST_PORTS = True
except ValueError:
IMPORTED_LIST_PORTS = False
from .options import SETTINGS_DICT
# link to usb-serial driver for macOS
_L1 = "http://www.prolific.com.tw/UserFiles/files/PL2303HXD_G_Driver_v2_0_0_20191204.zip"
# blog post explaining how to bypass blocked extensions
# need this because no Big Sur version of driver as of Jan 7 2020.
_L2 = "https://eclecticlight.co/2019/06/01/how-to-bypass-mojave-10-14-5s-new-kext-security/"
class LockInError(Exception):
"""named exception for LockIn serial port connection issues"""
pass
class LockIn(object):
"""
represents a usable connection with the lock-in amplifier
"""
SWEEP_HEADER = "{:>3} \t {:>15} \t {:>15} \t {:>15}"
SWEEP_BLANK = "{:>3d} \t {:>15,.2f} \t {:>15,.4e} \t {:>15,.4e}"
@staticmethod
def get_serial(comm_port):
return serial.Serial(comm_port,
baudrate=19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=3)
DEFAULT_PORTS = {
'darwin': ['/dev/cu.usbserial-1410'],
'win32': ['COM5'],
'linux': ['/dev/ttyUSB0']
}
def __init__(self, comm_port: str = None):
# (detect os and) set communication port
self._comm = None
if comm_port is not None:
try:
self._comm = LockIn.get_serial(comm_port)
except serial.SerialException:
print("lockintools: could not connect to port: %s" % comm_port)
else:
print("lockintools: trying default ports for platform: %s" % sys.platform)
for cp in LockIn.DEFAULT_PORTS[sys.platform]:
try:
self._comm = LockIn.get_serial(cp)
break
except serial.SerialException:
print("lockintools: could not connect to port: %s" % cp)
if self._comm is None and IMPORTED_LIST_PORTS:
print("lockintools: tying to detect port and auto-connect...")
for cp_match in list_ports.grep("(usb|USB)"):
cp_str = str(cp_match).split('-')[0].strip()
try:
self._comm = LockIn.get_serial(cp_str)
break
except serial.SerialException:
print("lockintools: could not connect to port: %s" % cp_str)
if self._comm is None:
raise LockInError("lockintools: CONNECTION FAILED! Do you have a driver installed?")
print("lockintools: SUCCESS! Connection established.")
self.print_to_stdout = True
@property
def comm(self):
# `serial.Serial` object for handling communications
return self._comm
def close(self):
"""closes communication port"""
if self.comm.is_open:
self.comm.close()
def open(self):
"""(re)-opens communication port"""
if not self.comm.is_open:
self.comm.open()
def cmd(self, command):
"""execute arbitrary lockin command"""
self.comm.write(str.encode(command + '\n'))
self.comm.flush()
if '?' in command:
state = bytes.decode(self.comm.readline())
return state
else:
return
def set_input_mode(self, mode):
"""set lockin input configuration"""
if mode == "A":
self.cmd("ISRC0")
elif mode == "A-B":
self.cmd("ISRC1")
elif mode == "I":
self.cmd("ISRC2")
elif mode == "I100":
self.cmd("ISRC3")
else:
raise ValueError("invalid mode {}, valid values are 'A', 'A-B', 'I', or 'I100'"
.format(mode))
def set_coupling_mode(self, mode):
if mode == "AC":
self.cmd("ICPL0")
elif mode == "DC":
self.cmd("ICPL1")
else:
raise ValueError("invalid mode {}, valid values are 'AC' or 'DC'"
.format(mode))
def set_freq(self, freq):
"""set lock-in amp. frequency"""
command = 'FREQ' + str(freq)
return self.cmd(command)
def set_ampl(self, ampl):
"""set lock-in amp. voltage amplitude"""
if ampl > 5.:
raise ValueError("can not exceed amplitude of 5V")
command = 'SLVL' + str(ampl)
return self.cmd(command)
def set_sens(self, sens):
"""set lock-in amp. sensitivity"""
if 0 <= sens <= 26:
self.cmd('SENS' + str(sens))
else:
raise ValueError("sensitivity setting must be between 0 (1 nV) and "
"26 (1 V)")
def set_harm(self, harm):
"""set lock-in amp. detection harmonic"""
harm = int(harm)
if 1 <= harm <= 19999:
self.cmd('HARM' + str(harm))
else:
raise ValueError("harmonic must be between 1 and 19999")
def get_reading(self, ch, meas_time=0.1, stdev=False):
"""
read average value from channel `ch` over `meas_time` seconds
optionally, also return standard deviation (`stdev=True`)
"""
if not (ch == 1 or ch == 2):
raise ValueError("channel `ch` should be 1 or 2")
self.cmd("REST")
self.cmd("STRT")
time.sleep(meas_time)
self.cmd("PAUS")
N = self.cmd("SPTS?")
r_str = self.cmd("TRCA?" + str(ch) + ",0," + N)
r = [float(ri) for ri in r_str.split(',')[:-1]]
if stdev:
return np.mean(r), np.std(r)
return np.mean(r)
def get_x(self, meas_time=0.1, stdev=False):
return self.get_reading(ch=1, meas_time=meas_time, stdev=stdev)
def get_y(self, meas_time=0.1, stdev=False):
return self.get_reading(ch=2, meas_time=meas_time, stdev=stdev)
def sweep(self, label: str, freqs, ampls, sens: int, harm: int,
stb_time: float = 9.,
meas_time: float = 1.,
ampl_time: float = 5.,
L_MAX: int = 50):
"""
Conduct a frequency sweep measurement across one or more voltage
amplitudes.
:param label: (string) label for the sweep data
:param freqs: (scalar or array-like) freqs. to sweep over
:param ampls: (scalar or array-like) amplitudes to sweep over
:param sens: (int) integer indicating lock-in amp. sensitivity setting
:param harm: (int) detection harmonic
:param stb_time: (float) time (s) for stabilization at each freq.
:param meas_time: (float) time (s) for data collection at each freq.
:param ampl_time: (float) time (s) for stabilization at each voltage
:param L_MAX: (int) maximum data array size
:return: (lockin.SweepData) container of pandas `DataFrame`s for
in- and out-of-phase detected voltages, and variances thereof
"""
self.set_harm(harm)
self.set_sens(sens)
ampls = np.asarray(ampls)
freqs = np.asarray(freqs)
if ampls.ndim == 0:
ampls = ampls[None]
if freqs.ndim == 0:
freqs = freqs[None]
# buffer arrays for in- and out-of-phase data
X = np.full((len(ampls), len(freqs), L_MAX), fill_value=np.nan)
Y = np.full((len(ampls), len(freqs), L_MAX), fill_value=np.nan)
for i, V in enumerate(ampls):
self._print('V = {:.2f} volts'.format(V))
self._print('waiting for stabilization after amplitude change...')
self.set_ampl(V)
self.set_freq(freqs[0])
time.sleep(ampl_time)
self._print('')
self._print(LockIn.SWEEP_HEADER.format('', 'freq [Hz]', 'X [V]', 'Y [V]'))
for j, freq in enumerate(freqs):
# self._print("waiting for stabilization at f = {:.4f} Hz "
# "({:d}/{:d})".format(freq, j + 1, len(freqs)))
self.set_freq(freq)
self.cmd('REST')
time.sleep(stb_time)
# self._print('taking measurement')
# beep(repeat=1)
self.cmd('STRT')
time.sleep(meas_time)
self.cmd('PAUS')
# self._print('extracting values')
N = self.cmd('SPTS?')
x_str = self.cmd('TRCA?1,0,' + N)
y_str = self.cmd('TRCA?2,0,' + N)
# list of values measured at a single point
# last character is a newline character
x = np.array([float(_) for _ in x_str.split(',')[:-1]])
y = np.array([float(_) for _ in y_str.split(',')[:-1]])
try:
X[i, j][:len(x)] = x
Y[i, j][:len(x)] = y
except ValueError:
warnings.warn("buffer array overflow encountered at point "
"f = {:.1f} Hz, V = {:.1f} volts"
.format(freq, V))
X[i, j] = x[:L_MAX]
Y[i, j] = y[:L_MAX]
x_ = np.mean(x[~np.isnan(x)])
y_ = np.mean(y[~np.isnan(y)])
self._print(LockIn.SWEEP_BLANK.format(j + 1, freq, x_, y_))
self._print('')
return SweepData(X, Y, freqs, ampls, label, sens, harm)
def get_config(self):
raw_config = {}
for key in SETTINGS_DICT.keys():
if key != 'names':
raw_config[key] = self.cmd(key + '?')
return raw_config
def _print(self, s):
if self.print_to_stdout:
print(s)
class SweepData(object):
"""
Contains the data relevant to a single sweep.
i.e. the amplitude of the oscillations described by the `harm`th harmonic of
the voltage measured across the heater line or shunt, for a driving
voltage `V` in `Vs` at a frequency `freq` in `freqs`.
The digested values (ex: `V_x[i]` and `dV_x[i]) at each point are the
average of many measurements at that point and the variance of those
measurements.
"""
def __init__(self, X, Y, freqs, Vs, label, sens, harm):
dt1 = datetime.now()
dt = dt1.strftime("%d-%m-%Y_%H-%M")
self.ID = '_'.join([label, 'HARM' + str(harm), 'SENS' + str(sens), dt])
# frequency and voltage ranges
self.freqs = freqs
self.Vs = Vs
# full raw buffer output from lock-in (padded with NaNs)
self.X = X
self.Y = Y
n = len(freqs)
m = len(Vs)
# initialing arrays for digests
V_x = np.zeros((m, n)) # in-phase amplitudes (left lockin display)
V_y = np.zeros((m, n)) # out-of-phase amplitudes (right lockin display)
dV_x = np.zeros((m, n)) # variances of buffer outputs over time
dV_y = np.zeros((m, n)) # variances of buffer output over time
for i in range(m):
for j in range(n):
_X_ = X[i, j]
_Y_ = Y[i, j]
_X_ = _X_[~np.isnan(_X_)]
_Y_ = _Y_[~np.isnan(_Y_)]
V_x[i, j] = np.mean(_X_)
V_y[i, j] = np.mean(_Y_)
dV_x[i, j] = np.std(_X_)
dV_y[i, j] = np.std(_Y_)
# converting to DataFrames for readability
self.V_x = pd.DataFrame(V_x.T, index=freqs, columns=Vs)
self.V_y = pd.DataFrame(V_y.T, index=freqs, columns=Vs)
self.dV_x = pd.DataFrame(dV_x.T, index=freqs, columns=Vs)
self.dV_y = pd.DataFrame(dV_y.T, index=freqs, columns=Vs)
class LockInData(object):
"""
contains and manages the data of various sweeps
"""
# TODO: consider standardizing data objects with `tc3omega` package.
def __init__(self, working_dir=None, create_dir=None, **kwargs):
if working_dir is None:
self.working_dir = os.getcwd()
else:
self.working_dir = os.path.expanduser(working_dir)
self.create_dir = create_dir
self.directory_created = False
self.DIR = None # absolute path to directory where files are saved
self.Vs_3w = None
self.Vs_1w = None
self.Vsh_1w = None
self.add_sweeps(**kwargs)
# paths to output files
self._V_file = None
self._dV_file = None
@property
def V_file(self):
if self._V_file is None:
raise ValueError("have not saved voltage data to any file")
return self._V_file
@property
def dV_file(self):
if self._dV_file is None:
raise ValueError("have not save error data to any file")
return self._dV_file
def add_sweeps(self, **kwargs):
for key, sweep_data in kwargs.items():
if hasattr(self, key):
if isinstance(sweep_data, SweepData):
self.__setattr__(key, sweep_data)
else:
raise ValueError("keyword argument '{}' is an not instance "
"of `lck_tools.SweepData` class"
.format(sweep_data))
else:
raise ValueError("keyword argument '{}' is not one of "
"'Vs_3w', 'Vs_1w', or 'Vsh_1w'.".format(key))
def init_save(self):
if self.directory_created:
return
if self.create_dir is None:
self.create_dir = 'recorded_' + str(datetime.date(datetime.now()))
# check if created directory name conflicts with any that already exist
name_conflict = True
d = 0
os.chdir(self.working_dir)
while name_conflict:
try:
os.mkdir(self.create_dir)
name_conflict = False
except FileExistsError:
if d == 0:
self.create_dir += '(1)'
d += 1
else:
self.create_dir = self.create_dir.replace('({})'.format(d),
'')
d += 1
self.create_dir += '({})'.format(d)
self.directory_created = True
self.DIR = '/'.join([self.working_dir, self.create_dir, ''])
def save_all(self):
self.init_save()
for name, sweep_data in zip(['Vs_3w', 'Vs_1w', 'Vsh_1w'],
[self.Vs_3w, self.Vs_1w, self.Vsh_1w]):
# skip empty data sets
if sweep_data is None:
continue
# recall each `Data` is an instance of `SweepData`
V_x_file_path = (self.DIR
+ '_'.join(['{}'.format(name), sweep_data.ID])
+ '.xlsx')
V_y_file_path = (self.DIR
+ '_'.join(['{}_o'.format(name), sweep_data.ID])
+ '.xlsx')
with pd.ExcelWriter(V_x_file_path) as writer:
sweep_data.V_x.to_excel(writer, sheet_name='val')
sweep_data.dV_x.to_excel(writer, sheet_name='var')
with pd.ExcelWriter(V_y_file_path) as writer:
sweep_data.V_y.to_excel(writer, sheet_name='val')
sweep_data.dV_y.to_excel(writer, sheet_name='var')
print("saved sweep data in '{}'".format(self.DIR))
def save_tc3omega(self, ampl):
self.init_save()
for name, sweep_data in zip(['Vs_3w', 'Vs_1w', 'Vsh_1w'],
[self.Vs_3w, self.Vs_1w, self.Vsh_1w]):
if sweep_data is None:
raise ValueError("no recorded data for attribute '{}'"
.format(name))
if not (np.all(self.Vs_1w.freqs == self.Vs_3w.freqs)
and np.all(self.Vs_1w.freqs == self.Vsh_1w.freqs)):
raise IndexError("frequencies don't match across scans")
if not (ampl in self.Vs_1w.Vs and ampl in self.Vs_3w.Vs
and ampl in self.Vsh_1w.Vs):
raise ValueError("specified voltage not found in every scan")
# unpack DataFrames into arrays
# values
_Vs_3w = self.Vs_3w.V_x[ampl].values
_Vs_1w = self.Vs_1w.V_x[ampl].values
_Vsh_1w = self.Vsh_1w.V_x[ampl].values
_Vs_3w_o = self.Vs_3w.V_y[ampl].values
_Vs_1w_o = self.Vs_1w.V_y[ampl].values
_Vsh_1w_o = self.Vsh_1w.V_y[ampl].values
# standard deviation of values
_dVs_3w = self.Vs_3w.dV_x[ampl].values
_dVs_1w = self.Vs_1w.dV_x[ampl].values
_dVsh_1w = self.Vsh_1w.dV_x[ampl].values
_dVs_3w_o = self.Vs_3w.dV_y[ampl].values
_dVs_1w_o = self.Vs_3w.dV_y[ampl].values
_dVsh_1w_o = self.Vsh_1w.dV_y[ampl].values
# write voltage values
V_output = np.array([_Vs_3w, _Vs_3w_o,
_Vs_1w, _Vs_1w_o,
_Vsh_1w, _Vsh_1w_o])
V_columns = ['Vs_3w', 'Vs_3w_o',
'Vs_1w', 'Vs_1w_o',
'Vsh_1w', 'Vsh_1w_o']
V_output_df = | pd.DataFrame(V_output.T, columns=V_columns) | pandas.DataFrame |
import argparse
import os
import time
import yaml
import pandas as pd
from kalasanty.data import DataWrapper
from kalasanty.net import UNet, dice_loss, dice, ovl
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
def input_path(path):
"""Check if input exists."""
path = os.path.abspath(path)
if not os.path.exists(path):
raise IOError('%s does not exist.' % path)
return path
def output_path(path):
path = os.path.abspath(path)
return path
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', required=True, type=input_path,
help='path to the .hdf file with prepared data (can be '
'created with prepare_dataset.py)')
parser.add_argument('--model', '-m', type=input_path,
help='path to the .hdf file with pretrained model. '
'If not specified, a new model will be trained from scratch.')
parser.add_argument('--batch_size', default=10, type=int)
parser.add_argument('--steps_per_epoch', default=150, type=int)
parser.add_argument('--epochs', default=10000, type=int)
parser.add_argument("-r", "--runconfig", dest='runconfig', type=str, required=True,
help=f"The run config yaml file")
parser.add_argument('--load', '-l', action='store_true',
help='whether to load all data into memory')
parser.add_argument('--output', '-o', type=output_path,
help='name for the output directory. If not specified, '
'"output_<YYYY>-<MM>-<DD>" will be used')
parser.add_argument('--verbose', '-v', default=2, type=int,
help='verbosity level for keras')
parser.add_argument('--device', '-d', type=str, default='gpu', const='gpu', choices=['gpu', 'cpu'],
nargs='?', help='device')
return parser.parse_args()
class LoadConfig:
def __init__(self, path):
runconfig = yaml.safe_load(open(path, 'r'))
self.train_ids = runconfig['train']
self.test_ids = runconfig['val']
def main():
args = parse_args()
runconfig = args.runconfig
config = LoadConfig(runconfig)
train_ids = config.train_ids
test_ids = config.test_ids
if args.device == 'cpu':
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
elif args.device == 'gpu':
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if args.output is None:
args.output = 'output_' + time.strftime('%Y-%m-%d')
if not os.path.exists(args.output):
os.makedirs(args.output)
if not os.access(args.output, os.W_OK):
raise IOError('Cannot create files inside %s (check your permissions).' % args.output)
if train_ids:
if test_ids:
all_ids = sorted(set(train_ids) | set(test_ids))
else:
all_ids = train_ids
else:
all_ids = None
data = DataWrapper(args.input, test_set=test_ids, pdbids=all_ids,
load_data=args.load)
if args.model:
model = UNet.load_model(args.model, data_handle=data)
else:
model = UNet(data_handle=data)
model.compile(optimizer=Adam(lr=1e-6), loss=dice_loss,
metrics=[dice, ovl, 'binary_crossentropy'])
train_batch_generator = data.batch_generator(batch_size=args.batch_size)
callbacks = [ModelCheckpoint(os.path.join(args.output, 'checkpoint.hdf'),
save_best_only=False)]
if test_ids:
val_batch_generator = data.batch_generator(batch_size=args.batch_size, subset='test')
num_val_steps = max(args.steps_per_epoch // 5, 1)
callbacks.append(ModelCheckpoint(os.path.join(args.output, 'best_weights.hdf'),
save_best_only=True))
else:
val_batch_generator = None
num_val_steps = None
model.fit_generator(train_batch_generator, steps_per_epoch=args.steps_per_epoch,
epochs=args.epochs, verbose=args.verbose, callbacks=callbacks,
validation_data=val_batch_generator, validation_steps=num_val_steps)
history = | pd.DataFrame(model.history.history) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
import re
import traceback
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from ..core import indexing
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.pycompat import PY3
from ..core.variable import Variable
from .variables import (
SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to,
safe_setitem, unpack_for_decoding, unpack_for_encoding)
try:
from pandas.errors import OutOfBoundsDatetime
except ImportError:
# pandas < 0.20
from pandas.tslib import OutOfBoundsDatetime
# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
_NS_PER_TIME_DELTA = {'us': int(1e3),
'ms': int(1e6),
's': int(1e9),
'm': int(1e9) * 60,
'h': int(1e9) * 60 * 60,
'D': int(1e9) * 60 * 60 * 24}
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _import_netcdftime():
'''
helper function handle the transition to netcdftime as a stand-alone
package
'''
try:
# Try importing netcdftime directly
import netcdftime as nctime
if not hasattr(nctime, 'num2date'):
# must have gotten an old version from netcdf4-python
raise ImportError
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as nctime
except ImportError:
raise ImportError("Failed to import netcdftime")
return nctime
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_netcdftime(num_dates, units, calendar):
nctime = _import_netcdftime()
dates = np.asarray(nctime.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'netcdftime.datetime objects instead, reason: dates out'
' of range', SerializationWarning, stacklevel=3)
else:
try:
dates = nctime_to_nptime(dates)
except ValueError as e:
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy netcdftime.datetime objects instead, reason:'
'{0}'.format(e), SerializationWarning, stacklevel=3)
return dates
def _decode_cf_datetime_dtype(data, units, calendar):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(
indexing.as_indexable(data))
example_value = np.concatenate([first_n_items(values, 1) or [0],
last_item(values) or [0]])
try:
result = decode_cf_datetime(example_value, units, calendar)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
dtype = getattr(result, 'dtype', np.dtype('object'))
return dtype
def decode_cf_datetime(num_dates, units, calendar=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than netcdftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
netcdftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using netcdftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (flat_num_dates.astype(np.float64) *
_NS_PER_TIME_DELTA[delta]).astype(np.int64)
dates = (pd.to_timedelta(flat_num_dates_ns_int, 'ns') +
ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_netcdftime(
flat_num_dates.astype(np.float), units, calendar)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ['days', 'hours', 'minutes', 'seconds']:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = pd.to_datetime(np.asarray(dates).ravel(), box=False)
dates = dates[pd.notnull(dates)]
unique_timedeltas = np.unique(np.diff(dates))
units = _infer_time_units_from_diff(unique_timedeltas)
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
return '%s since %s' % (units, | pd.Timestamp(reference_date) | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# ### Explore processed pan-cancer data
# In[1]:
import os
import sys
import numpy as np; np.random.seed(42)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
# In[2]:
DATA_TYPE = 'mut_sigs'
# load gene/classification info and sample/cancer type info
print('Loading gene label data...', file=sys.stderr)
genes_df = du.load_vogelstein()
sample_info_df = du.load_sample_info(DATA_TYPE, verbose=True)
# load mutation info
# this returns a tuple of dataframes, unpack it below
pancancer_data = du.load_pancancer_data(verbose=True)
(sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df) = pancancer_data
# In[3]:
# load relevant data
data_df = du.load_raw_data(DATA_TYPE, verbose=True)
# standardize columns of expression dataframe
if DATA_TYPE in cfg.standardize_data_types:
print('Standardizing columns of {} data...'.format(DATA_TYPE),
file=sys.stderr)
data_df[data_df.columns] = StandardScaler().fit_transform(data_df[data_df.columns])
print(data_df.shape)
data_df.iloc[:5, :5]
# First, let's look at the low-dimensional representation of the chosen data type.
#
# We'll choose a few cancer types that are similar to one another (LUSC/LUAD, LGG/GBM) and a few that should be dissimilar (BRCA, THCA).
# In[25]:
assert sample_info_df.index.equals(data_df.index)
# data_cancer_types = sorted(sample_info_df.cancer_type.unique())
data_cancer_types = ['LUAD', 'LUSC', 'THCA', 'LGG', 'GBM', 'BRCA']
data_types_df = (data_df
.merge(sample_info_df, left_index=True, right_index=True)
.query('cancer_type in @data_cancer_types')
.drop(columns=['sample_type', 'id_for_stratification'])
.reset_index()
)
print(data_types_df.cancer_type.unique())
data_types_df.iloc[:5, -5:]
# In[26]:
from sklearn.decomposition import PCA
from umap import UMAP
sns.set({'figure.figsize': (20, 8)})
fig, axarr = plt.subplots(1, 2)
pca = PCA(n_components=2)
X_proj_pca = pca.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
reducer = UMAP(n_components=2, random_state=42)
X_proj_umap = reducer.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
for i, cancer_type in enumerate(data_cancer_types):
ixs = data_types_df.index[data_types_df.cancer_type == cancer_type].tolist()
axarr[0].scatter(X_proj_pca[ixs, 0], X_proj_pca[ixs, 1], label=cancer_type, s=5)
axarr[1].scatter(X_proj_umap[ixs, 0], X_proj_umap[ixs, 1], label=cancer_type, s=5)
axarr[0].set_xlabel('PC1')
axarr[0].set_ylabel('PC2')
axarr[0].set_title('PCA projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[0].legend()
axarr[1].set_xlabel('UMAP dimension 1')
axarr[1].set_ylabel('UMAP dimension 2')
axarr[1].set_title('UMAP projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[1].legend()
# Now we want to dig a bit deeper into LGG and GBM, using expression and methylation data. It's fairly well-known that IDH1 mutation status defines distinct subtypes in both classes of brain tumors. We'll compare methylation and gene expression in IDH1-mutated vs. non-mutated samples, expecting to see a separation in our low dimensional representation.
#
# IDH1 plays a direct role in DNA methylation, so we anticipate that this separation between mutated and non-mutated samples will be slightly clearer in the methylation data.
# In[5]:
# load relevant data
rnaseq_df = du.load_raw_data('expression', verbose=True)
print('Standardizing columns of expression data...', file=sys.stderr)
rnaseq_df[rnaseq_df.columns] = StandardScaler().fit_transform(rnaseq_df[rnaseq_df.columns])
methylation_df = du.load_raw_data('me_27k', verbose=True)
print(methylation_df.shape)
methylation_df.iloc[:5, :5]
# In[6]:
from mpmp.utilities.tcga_utilities import process_y_matrix
def generate_labels(gene, classification):
# process the y matrix for the given gene or pathway
y_mutation_df = mutation_df.loc[:, gene]
# include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = copy_gain_df.loc[:, gene]
elif classification == "TSG":
y_copy_number_df = copy_loss_df.loc[:, gene]
else:
y_copy_number_df = pd.DataFrame()
include_copy = False
# construct labels from mutation/CNV information, and filter for
# cancer types without an extreme label imbalance
y_df = process_y_matrix(
y_mutation=y_mutation_df,
y_copy=y_copy_number_df,
include_copy=include_copy,
gene=gene,
sample_freeze=sample_freeze_df,
mutation_burden=mut_burden_df,
filter_count=1,
filter_prop=0.01,
output_directory=None,
hyper_filter=5,
test=True # don't write filter info to file
)
return y_df
# In[7]:
gene = 'IDH1'
cancer_types = ['LGG', 'GBM']
classification = du.get_classification(gene, genes_df)
y_df = generate_labels(gene, classification)
y_df = y_df[y_df.DISEASE.isin(cancer_types)]
print(y_df.shape)
y_df.tail()
# In[8]:
# generate UMAP 2-dimensional representations of data
shuffle = False
def shuffle_cols(input_df):
# randomly permute genes of each sample in the rnaseq matrix
shuf_df = input_df.apply(lambda x:
np.random.permutation(x.tolist()),
axis=1)
# set up new dataframe
shuf_df = | pd.DataFrame(shuf_df, columns=['col_list']) | pandas.DataFrame |
"""
事前準備に
$ pip install pandas
$ pip install openpyxl
$ pip install xlrd
が必要
リファレンス
https://pandas.pydata.org/pandas-docs/stable/reference/index.html
"""
import pandas as pd
import openpyxl
excel_in_path1 = './data/excel_in_header_2sheet.xlsx'
print("********何も指定せず読み込み********")
# 何も指定しない場合は最初のシートになる
df = pd.read_excel(excel_in_path1)
print(df)
print(df.index) # index(行の項目名)
print(df.columns) # header (列の項目名)
print(df.values) # 値
print(df.dtypes) # 判定指定した型
print(type(df.at[0,'Note'])) # "OK" 文字列の場合<str>
print(type(df.at[3,'Note'])) # Nan 空白(セルの内容がない場合)の場合<float>扱い
print(type(df.at[4,'Note'])) # 12 数値の場合<int64>
print("********シートを指定して読み込み********")
df = pd.read_excel(excel_in_path1, sheet_name="Member")
print(df)
df = pd.read_excel(excel_in_path1, sheet_name="History")
print(df)
print("********dtypeを指定して読み込み********")
df = | pd.read_excel(excel_in_path1, dtype={"Note": 'str'}) | pandas.read_excel |
import sys
import os
import pandas as pd
from pii_benchmarking import pii_engine
def is_labeled(entities, start_char, end_char):
for entity in entities:
if entity['start_char'] <= start_char and entity['end_char'] >= end_char:
return entity['entity_label']
return 'NONE'
def main(eval_dir):
eval_files = os.listdir(eval_dir)
for file in eval_files:
if file.endswith('.csv'):
service_names = ['aws', 'gcp', 'presidio']
for service_name in service_names:
print(f"Masking {file} with {service_name}...")
df = pd.read_csv(os.path.join(eval_dir, file), index_col=0)
masker = pii_engine.Masker(service_name)
masked_results = masker.batch_mask(df['Text'].tolist())
assert df.shape[0] == len(masked_results)
masked_col_name = f"Masked_{service_name}"
df[masked_col_name] = pd.Series([x['masked_input'] for x in masked_results])
entities_col_name = f"Entities_{service_name}"
df[entities_col_name] = | pd.Series([x['results'] for x in masked_results]) | pandas.Series |
from pathlib import Path
import pandas as pd
from fuzzywuzzy import fuzz
import time
import numpy as np
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
# coding: utf-8
"""基于HDF文件的数据库"""
import pandas as pd
import numpy as np
import os
import warnings
from multiprocessing import Lock
from ..utils.datetime_func import Datetime2DateStr, DateStr2Datetime
from ..utils.tool_funcs import ensure_dir_exists
from ..utils.disk_persist_provider import DiskPersistProvider
from .helpers import handle_ids, FIFODict
from pathlib import Path
from FactorLib.utils.tool_funcs import is_non_string_iterable
pd.options.compute.use_numexpr = True
lock = Lock()
warnings.simplefilter('ignore', category=FutureWarning)
def append_along_index(df1, df2):
df1, df2 = df1.align(df2, axis='columns')
new = pd.DataFrame(np.vstack((df1.values, df2.values)),
columns=df1.columns,
index=df1.index.append(df2.index))
new.sort_index(inplace=True)
return new
def auto_increase_keys(_dict, keys):
if _dict:
max_v = max(_dict.values())
else:
max_v = 0
for key in keys:
if key not in _dict:
max_v += 1
_dict[key] = max_v
return _dict
class H5DB(object):
def __init__(self, data_path, max_cached_files=30):
self.data_path = str(data_path)
self.feather_data_path = os.path.abspath(self.data_path+'/../feather')
self.csv_data_path = os.path.abspath(self.data_path+'/../csv')
self.data_dict = None
self.cached_data = FIFODict(max_cached_files)
self.max_cached_files = max_cached_files
# self._update_info()
def _update_info(self):
factor_list = []
for root, subdirs, files in os.walk(self.data_path):
relpath = "/%s/"%os.path.relpath(root, self.data_path).replace("\\", "/")
for file in files:
if file.endswith(".h5"):
factor_list.append([relpath, file[:-3]])
self.data_dict = pd.DataFrame(
factor_list, columns=['path', 'name'])
def _read_h5file(self, file_path, key):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
data = pd.read_hdf(file_path, key)
except KeyError:
data = pd.read_hdf(file_path, 'data')
finally:
lock.release()
# update at 2020.02.15: surpport wide dataframe
columns_mapping = self._read_columns_mapping(file_path)
if not columns_mapping.empty:
data.rename(
columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()),
inplace=True
)
if self.max_cached_files > 0:
self.cached_data[file_path] = data
return data
def _read_columns_mapping(self, file_path):
try:
data = pd.read_hdf(file_path, 'column_name_mapping')
except KeyError:
data = pd.Series()
return data
def _normalize_columns(self, input, column_mapping):
return column_mapping[column_mapping.index.isin(input)].tolist()
def _save_h5file(self, data, file_path, key,
complib='blosc', complevel=9,
mode='w', **kwargs):
try:
lock.acquire()
# update at 2020.02.15: surpport wide dataframe
if data.shape[1] > 1000:
columns_mapping = {x:y for x, y in zip(data.columns, range(data.shape[1]))}
data2 = data.rename(columns=columns_mapping)
else:
data2 = data
columns_mapping = {}
with pd.HDFStore(file_path, mode=mode, complevel=complevel,
complib=complib) as f:
f.put(key, data2, **kwargs)
f.put('column_name_mapping', pd.Series(columns_mapping))
if file_path in self.cached_data:
self.cached_data.update({file_path: data})
lock.release()
except Exception as e:
lock.release()
raise e
def _read_pklfile(self, file_path):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
d = pd.read_pickle(file_path)
if self.max_cached_files > 0:
self.cached_data[file_path] = d
lock.release()
except Exception as e:
lock.release()
raise e
return d
def _save_pklfile(self, data, file_dir, name, protocol=-1):
dumper = DiskPersistProvider(
os.path.join(self.data_path, file_dir.strip('/')))
file_path = os.path.join(
self.data_path, file_dir.strip('/'), name+'.pkl'
)
lock.acquire()
try:
dumper.dump(data, name, protocol)
if file_path in self.cached_data:
self.cached_data[file_path] = data
except Exception as e:
lock.release()
raise e
lock.release()
def _delete_cached_factor(self, file_path):
if file_path in self.cached_data:
del self.cached_data[file_path]
def set_data_path(self, path):
self.data_path = path
# self._update_info()
# ---------------------------因子管理---------------------------------------
# 查看因子是否存在
def check_factor_exists(self, factor_name, factor_dir='/'):
file_path = self.abs_factor_path(factor_dir, factor_name)
return os.path.isfile(file_path)
# 删除因子
def delete_factor(self, factor_name, factor_dir='/'):
factor_path = self.abs_factor_path(factor_dir, factor_name)
try:
os.remove(factor_path)
self._delete_cached_factor(factor_path)
except Exception as e:
print(e)
pass
self._update_info()
# 列出因子名称
def list_factors(self, factor_dir):
dir_path = self.data_path + factor_dir
factors = [x[:-3] for x in os.listdir(dir_path) if x.endswith('.h5')]
return factors
# 重命名因子
def rename_factor(self, old_name, new_name, factor_dir):
factor_path = self.abs_factor_path(factor_dir, old_name)
temp_factor_path = self.abs_factor_path(factor_dir, new_name)
factor_data = self._read_h5file(factor_path, old_name).rename(columns={old_name: new_name})
self._save_h5file(factor_data, temp_factor_path, new_name)
self.delete_factor(old_name, factor_dir)
# 新建因子文件夹
def create_factor_dir(self, factor_dir):
if not os.path.isdir(self.data_path+factor_dir):
os.makedirs(self.data_path+factor_dir)
# 因子的时间区间
def get_date_range(self, factor_name, factor_path):
try:
max_date = self.read_h5file_attr(factor_name, factor_path, 'max_date')
min_date = self.read_h5file_attr(factor_name, factor_path, 'min_date')
except Exception:
try:
panel = self._read_h5file(
self.abs_factor_path(factor_path, factor_name), key='data')
except KeyError:
panel = self._read_h5file(
self.abs_factor_path(factor_path, factor_name), key=factor_name)
if isinstance(panel, pd.Panel):
min_date = Datetime2DateStr(panel.major_axis.min())
max_date = Datetime2DateStr(panel.major_axis.max())
else:
min_date = panel.index.get_level_values('date').min()
max_date = panel.index.get_level_values('date').max()
return min_date, max_date
# 读取多列因子的属性
def read_h5file_attr(self, factor_name, factor_path):
attr_file_path = self.abs_factor_attr_path(factor_path, factor_name)
print(attr_file_path)
if os.path.isfile(attr_file_path):
return self._read_pklfile(attr_file_path)
else:
raise FileNotFoundError('找不到因子属性文件!')
def clear_cache(self):
self.cached_data = FIFODict(self.max_cached_files)
# --------------------------数据管理-------------------------------------------
@handle_ids
def load_factor(self, factor_name, factor_dir=None, dates=None, ids=None, idx=None,
date_level=0):
"""
加载一个因子
因子格式
-------
因子的存储格式是DataFrame(index=[date,IDs], columns=factor)
Parameters:
-----------
factor_name: str
因子名称
factor_dir: str
因子路径
dates: list
日期
ids: list
代码
idx: DataFrame or Series
索引
date_level: int
日期索引在多层次索引中的位置
"""
if idx is not None:
dates = idx.index.get_level_values('date').unique()
return (self
.load_factor(factor_name, factor_dir=factor_dir, dates=dates)
.reindex(idx.index, copy=False)
)
factor_path = self.abs_factor_path(factor_dir, factor_name)
data = self._read_h5file(factor_path, factor_name)
query_str = ""
if ids is not None:
if isinstance(ids, list):
query_str += "IDs in @ids"
else:
query_str += "IDs == @ids"
if len(query_str) > 0:
query_str += " and "
if dates is not None:
if is_non_string_iterable(dates):
query_str += "date in @dates"
else:
query_str += "date == @dates"
if query_str.endswith(" and "):
query_str = query_str.strip(" and ")
if query_str:
df = data.query(query_str)
return df
else:
return data
def load_factor2(self, factor_name, factor_dir=None, dates=None, ids=None, idx=None,
stack=False, check_A=False):
"""加载另外一种类型的因子
因子的格式是一个二维DataFrame,行索引是DatetimeIndex,列索引是股票代码。
check_A: 过滤掉非A股股票
"""
if idx is not None:
dates = idx.index.get_level_values('date').unique().tolist()
ids = idx.index.get_level_values('IDs').unique().tolist()
factor_path = self.abs_factor_path(factor_dir, factor_name)
columns_mapping = self._read_columns_mapping(factor_path)
if not columns_mapping.empty and ids is not None:
ids_normalized = self._normalize_columns(ids, columns_mapping)
if not ids_normalized:
return pd.DataFrame(columns=ids)
else:
ids_normalized = ids
where_term = None
if dates is not None:
dates = pd.to_datetime(dates)
where_term = "index in dates"
with pd.HDFStore(factor_path, mode='r') as f:
try:
data = pd.read_hdf(f, key='data', where=where_term, columns=ids_normalized)
except NotImplementedError as e:
data = pd.read_hdf(f, key='data').reindex(index=dates, columns=ids)
except KeyError as e:
f.close()
data = self.load_factor(factor_name, factor_dir, dates, ids)[factor_name].unstack()
if ids_normalized is not None and data.shape[1] != len(ids_normalized):
data = data.reindex(columns=ids_normalized)
if not columns_mapping.empty:
data.rename(columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()), inplace=True)
data.name = factor_name
if check_A:
data = data.filter(regex='^[6,0,3]', axis=1)
if stack:
data = data.stack().to_frame(factor_name)
data.index.names = ['date', 'IDs']
if idx is not None:
data = data.reindex(idx.index)
return data
def show_symbol_name(self, factor_data=None, factor_name=None,
factor_dir=None, dates=None, data_source=None):
"""返回带有股票简称的因子数据
Note:
factor_data应为AST或者SAST数据
"""
if data_source is None:
data_source = 'D:/data/factors'
import pandas as pd
names = pd.read_csv(os.path.join(data_source,'base','ashare_list_delist_date.csv'),
header=0,index_col=0,usecols=[0,1,2],
converters={'IDs': lambda x: str(x).zfill(6)},
encoding='GBK')
names.set_index('IDs', inplace=True)
if factor_data is None:
factor_data = self.load_factor2(factor_name, factor_dir, dates=dates)
factor_data = factor_data.stack().to_frame(factor_data.name)
if isinstance(factor_data.index, pd.MultiIndex):
factor_data = factor_data.reset_index().join(names, on='IDs', how='left')
elif isinstance(factor_data, pd.Series):
factor_data = factor_data.reset_index().join(names, on='IDs', how='left')
else:
factor_data = factor_data.stack().reset_index().join(names, on='IDs', how='left')
return factor_data
def read_h5file(self, file_name, path, group='data', check_A=None):
file_path = self.abs_factor_path(path, file_name)
data = self._read_h5file(file_path, key=group)
if check_A is not None:
data = data[data[check_A].str.match('^[0,3,6]')]
return data
def save_h5file(self, data, name, path, group='data', ignore_index=True,
drop_duplicated_by_index=True, drop_duplicated_by_keys=None,
if_exists='append', sort_by_fields=None, sort_index=False,
append_axis=0, **kwargs):
"""直接把DataFrame保存成h5文件
Parameters
----------
use_index: bool
当文件已存在,去重处理时按照索引去重。
ignore_index: bool:
if_exists='append'时, 是否重新建立索引。
if_exists: str
文件已存在时的处理方式:'append', 'replace' or 'update'.
'append': 直接添加,不做去重处理
'update': 添加后做去重处理,当'use_index'为TRUE时,按照
Index去重。
'replace': 重写文件
sort_by_fields: None or list
写入之前,DataFrame先按照字段排序
sort_index: bool, 默认为False
写入之前,是否按照索引排序
kwargs: 传入_save_h5file
"""
file_path = self.abs_factor_path(path, name)
if self.check_factor_exists(name, path):
df = self.read_h5file(name, path, group=group)
if if_exists == 'append':
data = pd.concat([df, data], axis=append_axis, ignore_index=ignore_index)
elif if_exists == 'replace':
pass
elif if_exists=='update':
data = pd.concat([df, data], axis=append_axis)
if drop_duplicated_by_index:
if append_axis == 0:
data = data[~data.index.duplicated(keep='last')]
else:
data = data.iloc[:, ~data.columns.duplicated(keep='last')]
else:
data.drop_duplicates(subset=drop_duplicated_by_keys,
keep='last',
inplace=True)
data.reset_index(drop=True, inplace=True)
else:
raise NotImplementedError
if ignore_index and not drop_duplicated_by_index:
data.reset_index(drop=True, inplace=True)
if sort_by_fields is not None:
data.sort_values(sort_by_fields, inplace=True)
if sort_index:
data.sort_index(inplace=True)
self._save_h5file(data, file_path, group, **kwargs)
def list_h5file_factors(self, file_name, file_pth):
""""提取h5File的所有列名"""
attr_file_path = self.data_path + file_pth + file_name + '_attr.pkl'
file_path = self.abs_factor_path(file_pth, file_name)
if os.path.isfile(attr_file_path):
attr = pd.read_pickle(attr_file_path)
return attr['factors']
attr_file_path = self.data_path + file_pth + file_name + '_mapping.pkl'
try:
attr = pd.read_pickle(attr_file_path)
return attr
except FileNotFoundError:
df = self._read_h5file(file_path, "data")
return df.columns.tolist()
def load_factors(self, factor_names_dict, dates=None, ids=None):
_l = []
for factor_path, factor_names in factor_names_dict.items():
for factor_name in factor_names:
df = self.load_factor(factor_name, factor_dir=factor_path, dates=dates, ids=ids)
_l.append(df)
return pd.concat(_l, axis=1)
def load_factors2(self, factor_names_dict, dates=None, ids=None, idx=None,
merge=True, stack=True):
assert not (merge is True and stack is False)
_l = []
for factor_path, factor_names in factor_names_dict.items():
for factor_name in factor_names:
df = self.load_factor2(factor_name, factor_dir=factor_path, dates=dates, ids=ids,
idx=idx, stack=stack)
_l.append(df)
if merge:
return pd.concat(_l, axis=1)
return tuple(_l)
def load_factors3(self, factor_names_dict, dates=None, ids=None,
idx=None):
if (dates is None or ids is None) and (idx is None):
raise ValueError("idx must not be None, or both date and ids must not be None!")
l = []
factor_name_list = []
for factor_path, factor_names in factor_names_dict.items():
for factor_name in factor_names:
factor_name_list.append(factor_name)
df = self.load_factor2(factor_name, factor_dir=factor_path, dates=dates, ids=ids,
idx=idx, stack=False)
l.append(df.to_numpy())
K = len(factor_name_list)
T, N = l[0].shape
threeD = np.concatenate(l, axis=0).reshape((K, T*N)).T
df = pd.DataFrame(threeD,
index=pd.MultiIndex.from_product([df.index,df.columns], names=['date', 'IDs']),
columns=factor_name_list)
return df
def load_macro_factor(self, factor_name, factor_dir, ids=None, ann_dates=None, dates=None,
date_level=0, time='15:00'):
data = self.load_factor(factor_name, factor_dir, ids=ids, date_level=date_level)
if 'ann_dt' in data.columns and ann_dates is not None:
data = data.reset_index().set_index('ann_dt').sort_index()
dates = pd.to_datetime(ann_dates, format='%Y%m%d') + pd.Timedelta(hours=int(time[:2]), minutes=int(time[-2:]))
df = data.groupby('name').apply(lambda x: x.reindex(dates, method='ffill'))[['data']]
else:
if dates is None:
dates = slice(None)
else:
dates = pd.to_datetime(dates, format='%Y%m%d')
if date_level == 0:
df = data.loc[pd.IndexSlice[dates, :], ['data']]
else:
df = data.loc[pd.IndexSlice[:, dates], ['data']]
return df
def save_factor(self, factor_data, factor_dir, if_exists='update'):
"""往数据库中写数据
数据格式:DataFrame(index=[date,IDs],columns=data)
Parameters:
-----------
factor_data: DataFrame
"""
if isinstance(factor_data, pd.Series):
factor_data = factor_data.to_frame()
if factor_data.index.nlevels == 1:
if isinstance(factor_data.index, pd.DatetimeIndex):
factor_data['IDs'] = '111111'
factor_data.set_index('IDs', append=True, inplace=True)
else:
factor_data['date'] = DateStr2Datetime('19000101')
factor_data.set_index('date', append=True, inplace=True)
factor_data.sort_index(inplace=True)
self.create_factor_dir(factor_dir)
for column in factor_data.columns:
factor_path = self.abs_factor_path(factor_dir, column)
if not self.check_factor_exists(column, factor_dir):
self._save_h5file(factor_data[[column]].dropna(),
factor_path, column)
elif if_exists == 'update':
old_panel = self._read_h5file(factor_path, column)
new_frame = old_panel.append(factor_data[[column]].dropna())
new_panel = new_frame[~new_frame.index.duplicated(keep='last')].sort_index()
self._save_h5file(new_panel,
factor_path,
column
)
elif if_exists == 'replace':
self._save_h5file(factor_data[[column]].dropna(),
factor_path,
column
)
else:
raise KeyError("please make sure if_exists is validate")
def save_factor2(self, factor_data, factor_dir, if_exists='append',
fillvalue=None, fillmethod=None):
"""往数据库中写数据
数据格式:DataFrame(index=date, columns=IDs)
"""
if isinstance(factor_data, pd.Series):
if isinstance(factor_data.index, pd.MultiIndex):
factor_name = factor_data.name
factor_data = factor_data.unstack()
else:
raise ValueError("Format of factor_data is invalid.")
elif isinstance(factor_data, pd.DataFrame):
if factor_data.shape[1] > 1 and factor_data.index.nlevels > 1:
raise ValueError("Column of factor_data must be one.")
elif factor_data.index.nlevels > 1:
factor_name = factor_data.columns[0]
factor_data = factor_data[factor_name].unstack()
else:
factor_name = factor_data.name
else:
raise NotImplementedError
self.create_factor_dir(factor_dir)
factor_path = self.abs_factor_path(factor_dir, factor_name)
if not self.check_factor_exists(factor_name, factor_dir):
self._save_h5file(factor_data, factor_path, 'data', complevel=9,
format='table')
elif if_exists == 'append':
raw = self._read_h5file(factor_path, key='data')
new = factor_data[~factor_data.index.isin(raw.index)]
d = append_along_index(raw, new)
if fillvalue:
d = d.sort_index().fillna(fillvalue)
if fillmethod:
d = d.sort_index().fillna(method=fillmethod)
self._save_h5file(d, factor_path, 'data', complevel=0,
format='table')
elif if_exists == 'update':
raw = self._read_h5file(factor_path, key='data')
raw, factor_data = raw.align(factor_data, axis='columns')
raw.update(factor_data)
d = append_along_index(raw, factor_data[~factor_data.index.isin(raw.index)])
if fillvalue:
d = d.sort_index().fillna(fillvalue)
if fillmethod:
d = d.sort_index().fillna(method=fillmethod)
self._save_h5file(d, factor_path, 'data', complevel=0,
format='table')
elif if_exists == 'replace':
self._save_h5file(factor_data, factor_path, 'data', complevel=0,
format='table')
else:
pass
def save_as_dummy(self, factor_data, factor_dir, indu_name=None, if_exists='update'):
"""往数据库中存入哑变量数据
factor_data: pd.Series or pd.DataFrame
当factor_data是Series时,首先调用pd.get_dummy()转成行业哑变量
"""
if isinstance(factor_data, pd.Series):
assert factor_data.name is not None or indu_name is not None
factor_data.dropna(inplace=True)
indu_name = indu_name if indu_name is not None else factor_data.name
factor_data = pd.get_dummies(factor_data)
else:
assert isinstance(factor_data, pd.DataFrame) and indu_name is not None
factor_data = factor_data.drop('T00018', axis=0, level='IDs').fillna(0)
factor_data = factor_data.loc[(factor_data != 0).any(axis=1)]
file_pth = self.abs_factor_path(factor_dir, indu_name)
if self.check_factor_exists(indu_name, factor_dir) and if_exists=='update':
mapping = self._read_pklfile(file_pth.replace('.h5', '_mapping.pkl'))
factor_data = factor_data.reindex(columns=mapping)
new_saver = pd.DataFrame(np.argmax(factor_data.values, axis=1), columns=[indu_name],
index=factor_data.index)
else:
new_saver = pd.DataFrame(np.argmax(factor_data.values, axis=1), columns=[indu_name],
index=factor_data.index)
mapping = factor_data.columns.values.tolist()
self.save_factor(new_saver, factor_dir, if_exists=if_exists)
self._save_pklfile(mapping, factor_dir, indu_name+'_mapping', protocol=2)
def save_as_dummy2(self, factor_data, factor_dir, indu_name=None, if_exists='update'):
"""往数据库中存入哑变量数据
factor_data: pd.Series or pd.DataFrame
当factor_data是Series时,首先调用pd.get_dummy()转成行业哑变量
"""
if isinstance(factor_data, pd.Series):
assert factor_data.name is not None or indu_name is not None
factor_data.dropna(inplace=True)
indu_name = indu_name if indu_name is not None else factor_data.name
factor_data = | pd.get_dummies(factor_data) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 21:30:16 2018
@author: yang
"""
from imp import reload
import sys
reload(sys)
# sys.setdefaultencoding("utf-8")
import gc
import re
import sys
import time
import jieba
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import os.path
import os
import re
import datetime
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
import lightgbm as lgb
import gensim
from gensim.models import Word2Vec
from sklearn.tree import DecisionTreeRegressor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import ensemble
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
train = pd.read_csv('data/train_set.csv')#1653
test = pd.read_csv('data/test_set.csv')
train_temp=train.copy()
test_temp=test.copy()
def chushuang(x):
if type(x)==type('str'):
return x.split(' ')[0]
else:
return x
for i,j in enumerate(['2403','2405']):
train[j]=train[j].map(chushuang)
test[j]=test[j].map(chushuang)
#train['2403']=train['2403'].str.split(' ')[0]
le=LabelEncoder()
train=train.convert_objects(convert_numeric=True)
test=test.convert_objects(convert_numeric=True)
#train=train.loc[0:100,:]
#test=test.loc[0:100,:]
def remain_float(df,thresh=0.96):
float_feats = []
str_feats = []
print('----------select float data-----------')
print('sum',len(df.columns))
for c in df.columns:
num_missing = df[c].isnull().sum()
missing_percent = num_missing / float(df.shape[0])
if df[c].dtype == 'float64' and missing_percent<thresh:
float_feats.append(c)
elif df[c].dtype == 'int64':
print(c)
else:
str_feats.append(c)
return float_feats,str_feats
floatyin_feats=['1850','2177','2371','2376','300017','300036','809021']
float_feats,str_feats=remain_float(train,thresh=0.96)
str_feats=str_feats+floatyin_feats
str_feats.remove('vid')
train_float=train[float_feats]
test_float=test[float_feats]
train_str=train_temp[str_feats]
test_str=test_temp[str_feats]
a=train_float.head(100)
#-------------------str kaishi
str_feature=[c for c in train_str.columns if c not in floatyin_feats]
train_xue=pd.DataFrame()
test_xue=pd.DataFrame()
#----------nannv
def zigong(x):
if type(x)==type('str'):
return 1
else:
return 0
for i,j in enumerate(['0121']):
train_xue['sex']=train_str[j].map(zigong)
test_xue['sex']=test[j].map(zigong)
def qianliexian(x):
if type(x)==type('str'):
return 2
else:
return 0
for i,j in enumerate(['0120']):
train_xue['sex']=train_xue['sex'] | train_str[j].map(qianliexian)
test_xue['sex']=test_xue['sex'] | test[j].map(qianliexian)
str_feature=[c for c in train_str.columns if c not in floatyin_feats]
train_str=train_str.fillna("正常")
test_str=test_str.fillna("正常")
def zigongruxian0102(x):
if '子宫' in x or '附件' in x:
return 1
elif '前列腺' in x:
return 2
else:
return 0
for i,j in enumerate(['0101','0102']):
train_xue['sex']=train_xue['sex'] | train_str[j].map(zigongruxian0102)
test_xue['sex']=test_xue['sex'] | test_str[j].map(zigongruxian0102)
train_xue = pd.concat([train_xue, pd.get_dummies(train_xue['sex'])], axis=1)
test_xue = pd.concat([test_xue, pd.get_dummies(test_xue['sex'])], axis=1)
train_xue = train_xue.drop('sex',axis=1)
test_xue = test_xue.drop('sex',axis=1)
train_xue.columns=['man','woman','el']
test_xue.columns=['man','woman','el']
train_str=train_str.fillna("正常")
test_str=test_str.fillna("正常")
#----------------------xue ya zheng ze
xue_feature=['血压','糖尿病','冠心病','甲肝','病史','治疗','胃溃疡','房颤','间断','痛风','血糖','冠心病','胃炎','结石',
'血吸虫','肺心病','甲亢','心肌炎','脑血栓','尿酸','肝硬化','血脂','血症','肾炎','肥胖',
'胰腺炎','脂肪','动脉硬化','动脉壁','血管壁','心动过缓','心动过速','甲状腺功能亢进','甲状腺病变','乙肝']
for i,j in enumerate(str_feature):
if i==0:
for name in xue_feature:
train_xue[name] = train_str[j].map(lambda x:1 if name in x else 0)
test_xue[name] = test_str[j].map(lambda x:1 if name in x else 0)
#train_xue['血压量']=train_str[j].map(xueya)
#test_xue['血压量']=test_str[j].map(xueya)
else:
for name in xue_feature:
train_xue[name]=train_xue[name] | train_str[j].map(lambda x:1 if name in x else 0)
test_xue[name] = test_xue[name] | test_str[j].map(lambda x:1 if name in x else 0)
#train_xue['血压量']=train_xue['血压量'] | train_str[j].map(xueya)
#test_xue['血压量'] = test_xue['血压量'] | test_str[j].map(xueya)
train_xue['动脉壁']=train_xue['动脉硬化']|train_xue['动脉壁']
train_xue['甲亢']=train_xue['甲亢']|train_xue['甲状腺功能亢进']|train_xue['甲状腺病变']
del(train_xue['动脉硬化'])
del(train_xue['甲状腺功能亢进'])
del(train_xue['甲状腺病变'])
#--------str to shuzi tezheng
def chang(x):
if type(x)==type('str'):
return len(x)
else:
return np.nan
#train_temp[float_feats].applymap(dd)
#print pd.(train_temp[float_feats].values.flatten())
#train_temp[float_feats].str.isalnum()
for i,j in enumerate(train_temp[str_feats].columns):
name=str_feats[i]+'_0'
if 1:
train_xue[name] = train_temp[j].map(chang)
test_xue[name] = test_temp[j].map(chang)
train_xue=train_xue.fillna(train_xue.median())
test_xue=test_xue.fillna(test_xue.median())
for i,j in enumerate(train_temp[str_feats].columns):
name=str_feats[i]+'_0'
if 1:
train_xue[name] = (train_xue[name]-train_xue[name].mean())/train_xue[name].std()
test_xue[name] = (test_xue[name] - test_xue[name].mean()) / test_xue[name].std()
#----------yinxing
train_yin=pd.DataFrame()
test_yin= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.dates as mpd
import datetime as dt
'''
一个日期文本转换为TimStamp之后,默认是没有时区信息的
没有时区信息的timestamp转化为num之后,其值与将其设置为utc之后的num值是一样的,从#2看出
timestamp一旦设置为某个时区之后,其num值便固定。时区只是作为当地时间的调整出现。
2015-12-21 09:30:00
设置为Asia/Shanghai,之后,表示为2015-12-21 09:30:00+08:00
将上海时间转化为东京时间,之后,表示为2015-12-21 10:30:00+09:00
将上海时间转化为utc时间,之后,表示为2015-12-21 01:30:00+00:00
以上转化的意义是,上海当地时间9:30时,东京当地时间为10:30
上海当地时间9:30时,utc标准时间为01:30,utc标准时间为巴黎时间。
'''
# 1
history = ['2015-12-21 09:30:00']
history = ['2015-12-21'+' 09:30:00']
date = dt.datetime.strptime(history[0], '%Y-%m-%d %H:%M:%S')
dateNum = mpd.date2num(date)
print(type(date))
print(date)
print(dateNum)
date = pd.to_datetime(history[0], '%Y-%m-%d %H:%M:%S')
dateNum = mpd.date2num(date)
print('-----time with none timezone,')
print(type(date))
print(date)
print(dateNum)
date = date.tz_localize('Asia/Shanghai')
dateNum = mpd.date2num(date)
print('-----set time zone to shanghai')
print(type(date))
print(date)
print(dateNum)
date = date.tz_convert('utc')
dateNum = mpd.date2num(date)
print('-----time utc')
print(type(date))
print(date)
print(dateNum)
date = date.tz_convert('Asia/Tokyo')
dateNum = mpd.date2num(date)
print('----time us/eastern')
print(type(date))
print(date)
print(dateNum)
date = date.tz_convert('US/Eastern')
dateNum = mpd.date2num(date)
print('----time us/eastern')
print(type(date))
print(date)
print(dateNum)
# 2
import pandas as pd
import numpy as np
import matplotlib.dates as mpd
import datetime as dt
history = ['2015-12-21 09:30:00']
date = dt.datetime.strptime(history[0], '%Y-%m-%d %H:%M:%S')
dateNum = mpd.date2num(date)
print(type(date))
print(date)
print(dateNum)
date = pd.to_datetime(history[0], '%Y-%m-%d %H:%M:%S')
dateNum = mpd.date2num(date)
print('-----time with none timezone,')
print(type(date))
print(date)
print(dateNum)
date = | pd.to_datetime(history[0], '%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import pandas_datareader.data as web
pytestmark = pytest.mark.stable
class TestEurostat(object):
def test_get_ert_h_eur_a(self):
# Former euro area national currencies vs. euro/ECU
# annual data (ert_h_eur_a)
df = web.DataReader(
"ert_h_eur_a",
"eurostat",
start=pd.Timestamp("2009-01-01"),
end=pd.Timestamp("2010-01-01"),
)
assert isinstance(df, pd.DataFrame)
header = df.columns.levels[0][0]
currencies = ["Italian lira", "Lithuanian litas"]
df = df[header]
df = df["Average"][currencies]
exp_col = pd.MultiIndex.from_product(
[currencies, ["Annual"]], names=["CURRENCY", "FREQ"]
)
exp_idx = pd.DatetimeIndex(["2009-01-01", "2010-01-01"], name="TIME_PERIOD")
values = np.array([[1936.27, 3.4528], [1936.27, 3.4528]])
expected = pd.DataFrame(values, index=exp_idx, columns=exp_col)
tm.assert_frame_equal(df, expected)
def test_get_sts_cobp_a(self):
# Building permits - annual data (2010 = 100)
df = web.DataReader(
"sts_cobp_a",
"eurostat",
start=pd.Timestamp("2000-01-01"),
end=pd.Timestamp("2013-01-01"),
)
idx = pd.date_range("2000-01-01", "2013-01-01", freq="AS", name="TIME_PERIOD")
ne_name = (
"Index, 2010=100",
"Building permits - m2 of useful floor area",
"Unadjusted data (i.e. neither seasonally adjusted nor "
"calendar adjusted data)",
"Non-residential buildings, except office buildings",
"Netherlands",
"Annual",
)
ne_values = [
200.0,
186.5,
127.3,
130.7,
143.3,
147.8,
176.7,
227.4,
199.4,
128.5,
100.0,
113.8,
89.3,
77.6,
]
ne = pd.Series(ne_values, name=ne_name, index=idx)
uk_name = (
"Index, 2010=100",
"Building permits - m2 of useful floor area",
"Unadjusted data (i.e. neither seasonally adjusted nor "
"calendar adjusted data)",
"Non-residential buildings, except office buildings",
"United Kingdom",
"Annual",
)
uk_values = [
112.5,
113.3,
110.2,
112.1,
119.1,
112.7,
113.1,
121.8,
114.0,
105.9,
100.0,
98.6,
103.7,
81.3,
]
uk = | pd.Series(uk_values, name=uk_name, index=idx) | pandas.Series |
import os
import json
import luigi
import pandas as pd
class MakeTapConfig(luigi.Task):
ticker = luigi.Parameter()
def requires(self):
return []
def output(self):
return luigi.LocalTarget('config/%s.json' % self.ticker)
def run(self):
with self.output().open('w') as f:
json.dump({'start_date': '2017-01-01', 'end_date': '2017-07-03',
'ticker': self.ticker}, f)
class SyncPrice(luigi.Task):
ticker = luigi.Parameter()
def requires(self):
return MakeTapConfig(ticker=self.ticker)
def output(self):
return luigi.LocalTarget('output/%s.csv' % self.ticker)
def run(self):
tap_cmd = 'tap-quandl-stock-price -c %s' % self.input().fn
target_cmd = 'target-csv -c csv_config.json -o %s' % self.output().fn
os.system('%s | %s' % (tap_cmd, target_cmd))
class QuandlSync(luigi.Task):
input_filename = luigi.Parameter()
output_filename = luigi.Parameter()
def requires(self):
task_list = []
with open(self.input_filename, 'r') as f:
task_list = [SyncPrice(ticker.strip()) for ticker in f.readlines()]
return task_list
def output(self):
return luigi.LocalTarget(self.output_filename)
def run(self):
input_filenames = [x.fn for x in self.input()]
df_list = [ | pd.read_csv(fn) | pandas.read_csv |
import ops.utils
import networkx as nx
import pandas as pd
import numpy as np
import scipy.spatial.kdtree
from collections import Counter
from scipy.spatial.distance import cdist
from scipy.interpolate import UnivariateSpline
from statsmodels.stats.multitest import multipletests
def format_stats_wide(df_stats):
index = ['gene_symbol']
columns = ['stat_name', 'stimulant']
values = ['statistic', 'pval', 'pval_FDR_10']
stats = (df_stats
.pivot_table(index=index, columns=columns, values=values)
.pipe(ops.utils.flatten_cols))
counts = (df_stats
.pivot_table(index=index, columns='stimulant', values='count')
.rename(columns=lambda x: 'cells_' + x))
return pd.concat([stats, counts], axis=1)
def distribution_difference(df):
col = 'dapi_gfp_corr_early'
y_neg = (df
.query('gene_symbol == "non-targeting"')
[col]
)
return df.groupby('gene_symbol').apply(lambda x:
scipy.stats.wasserstein_distance(x[col], y_neg))
def add_est_timestamps(df_all):
s_per_frame = 24 * 60
sites_per_frame = 2 * 364
s_per_site = s_per_frame / sites_per_frame
starting_time = 3 * 60
cols = ['frame', 'well', 'site']
df_ws = df_all[cols].drop_duplicates().sort_values(cols)
est_timestamps = [(starting_time + i*s_per_site) / 3600
for i in range(len(df_ws))]
df_ws['timestamp'] = est_timestamps
return df_all.join(df_ws.set_index(cols), on=cols)
def add_dapi_diff(df_all):
index = ['well', 'site', 'cell_ph']
dapi_diff = (df_all
.pivot_table(index=index, columns='frame',
values='dapi_max')
.pipe(lambda x: x/x.mean())
.pipe(lambda x: x.max(axis=1) - x.min(axis=1))
.rename('dapi_diff')
)
return df_all.join(dapi_diff, on=index)
def add_spline_diff(df, s=25):
T_neg, Y_neg = (df
.query('gene_symbol == "non-targeting"')
.groupby('timestamp')
['dapi_gfp_corr'].mean()
.reset_index().values.T
)
ix = np.argsort(T_neg)
spl = UnivariateSpline(T_neg[ix], Y_neg[ix], s=s)
return (df
.assign(splined=lambda x: spl(df['timestamp']))
.assign(spline_diff=lambda x: x.eval('dapi_gfp_corr - splined'))
)
def get_stats(df, col='spline_diff'):
df_diff = (df
.groupby(['gene_symbol', 'cell'])
[col].mean()
.sort_values(ascending=False)
.reset_index())
negative_vals = (df_diff
.query('gene_symbol == "non-targeting"')
[col]
)
test = lambda x: scipy.stats.ttest_ind(x, negative_vals).pvalue
stats = (df_diff.groupby('gene_symbol')
[col]
.pipe(ops.utils.groupby_reduce_concat, 'mean', 'count',
pval=lambda x: x.apply(test))
.assign(pval_FDR_10=lambda x:
multipletests(x['pval'], 0.1)[1]))
return stats
# track nuclei nearest neighbor
def initialize_graph(df):
arr_df = [x for _, x in df.groupby('frame')]
nodes = df[['frame', 'label']].values
nodes = [tuple(x) for x in nodes]
G = nx.DiGraph()
G.add_nodes_from(nodes)
edges = []
for df1, df2 in zip(arr_df, arr_df[1:]):
edges = get_edges(df1, df2)
G.add_weighted_edges_from(edges)
return G
def get_edges(df1, df2):
neighboring_points = 3
get_label = lambda x: tuple(int(y) for y in x[[2, 3]])
x1 = df1[['i', 'j', 'frame', 'label']].values
x2 = df2[['i', 'j', 'frame', 'label']].values
kdt = scipy.spatial.kdtree.KDTree(df1[['i', 'j']])
points = df2[['i', 'j']]
result = kdt.query(points, neighboring_points)
edges = []
for i2, (ds, ns) in enumerate(zip(*result)):
end_node = get_label(x2[i2])
for d, i1 in zip(ds, ns):
start_node = get_label(x1[i1])
w = d
edges.append((start_node, end_node, w))
return edges
def displacement(x):
d = np.sqrt(np.diff(x['x'])**2 + np.diff(x['y'])**2)
return d
def analyze_graph(G, cutoff=100):
"""Trace a path forward from each nucleus in the starting frame. Only keep
the paths that reach the final frame.
"""
start_nodes = [n for n in G.nodes if n[0] == 0]
max_frame = max([frame for frame, _ in G.nodes])
cost, path = nx.multi_source_dijkstra(G, start_nodes, cutoff=cutoff)
cost = {k:v for k,v in cost.items() if k[0] == max_frame}
path = {k:v for k,v in path.items() if k[0] == max_frame}
return cost, path
def filter_paths(cost, path, threshold=35):
"""Remove intersecting paths.
returns list of one [(frame, label)] per trajectory
"""
# remove intersecting paths (node in more than one path)
node_count = Counter(sum(path.values(), []))
bad = set(k for k,v in node_count.items() if v > 1)
print('bad', len(bad), len(node_count))
# remove paths with cost over threshold
too_costly = [k for k,v in cost.items() if v > threshold]
bad = bad | set(too_costly)
relabel = [v for v in path.values() if not (set(v) & bad)]
assert(len(relabel) > 0)
return relabel
def relabel_nuclei(nuclei, relabel):
nuclei_ = nuclei.copy()
max_label = nuclei.max() + 1
for i, nodes in enumerate(zip(*relabel)):
labels = [n[1] for n in nodes]
table = np.zeros(max_label).astype(int)
table[labels] = range(len(labels))
nuclei_[i] = table[nuclei_[i]]
return nuclei_
# track nuclei trackmate
def call_TrackMate_centroids(input_path, output_path='trackmate_output.csv', fiji_path=None, threads=1, tracker_settings=dict()):
'''warnings: - `threads` is probably not actually setting the max threads for fiji.
- to allow multiple instances of fiji to run concurrently (e.g., launched from snakemake pipeline), likely have
to set `allowMultiple` parameter in Fiji.app/Contents/Info.plist to true.
`CUTOFF_PERCENTILE` parameter in tracker_settings changes the alternative cost to gap closing/merging/splitting. Higher values ->
more gap closures/merges/splits.
'''
import subprocess, json
if fiji_path is None:
import sys
if sys.platform == "darwin":
fiji_path = '/Applications/Fiji.app/Contents/MacOS/ImageJ-macosx'
elif sys.platform == "linux":
fiji_path = '~/Fiji.app/ImageJ-linux64'
else:
raise ValueError("Currently only OS X and linux systems can infer Fiji install location.")
tracker_defaults = {"LINKING_MAX_DISTANCE":60.,"GAP_CLOSING_MAX_DISTANCE":60.,
"ALLOW_TRACK_SPLITTING":True,"SPLITTING_MAX_DISTANCE":60.,
"ALLOW_TRACK_MERGING":True,"MERGING_MAX_DISTANCE":60.,
"MAX_FRAME_GAP":2,"CUTOFF_PERCENTILE":0.90}
for key, val in tracker_defaults.items():
_ = tracker_settings.setdefault(key,val)
trackmate_call = ('''{fiji_path} --ij2 --headless --console --run {ops_path}/external/TrackMate/track_centroids.py'''
.format(fiji_path=fiji_path,ops_path=ops.__path__[0]))
variables = ('''"input_path='{input_path}',output_path='{output_path}',threads={threads},tracker_settings='{tracker_settings}'"'''
.format(input_path=input_path,output_path=output_path,
threads=int(threads),tracker_settings=json.dumps(tracker_settings)))
output = subprocess.check_output(' '.join([trackmate_call,variables]), shell=True)
print(output.decode("utf-8"))
def format_trackmate(df):
import ast
df = (pd.concat([df,
pd.DataFrame(df['parent_ids'].apply(lambda x: ast.literal_eval(x)).tolist(),
index = df.index,columns=['parent_id_0','parent_id_1'])
],axis=1)
.fillna(value=-1)
.drop(columns=['parent_ids'])
.assign(relabel=-1,parent_cell_0=-1,parent_cell_1=-1)
.astype(int)
.set_index('id')
)
lookup = np.zeros((df.index.max()+2,3),dtype=int)
lookup[df.index] = (df
[['cell','parent_id_0','parent_id_1']]
.values
)
lookup[-1] = np.array([-1,-1,-1])
set_cols = ['relabel','parent_cell_0','parent_cell_1']
current = 1
arr_frames = []
for frame,df_frame in df.groupby('frame'):
df_frame = df_frame.copy()
if frame==0:
arr_frames.append(df_frame.assign(relabel = list(range(current,current+df_frame.pipe(len))),
parent_cell_0 = -1,
parent_cell_1 = -1))
current += df_frame.pipe(len)
continue
# unique child from single parent
idx_propagate = ((df_frame.duplicated(['parent_id_0','parent_id_1'],keep=False)==False)
&
((df_frame[['parent_id_0','parent_id_1']]==-1).sum(axis=1)==1)
).values
lookup[df_frame[idx_propagate].index.values] = df_frame.loc[idx_propagate,set_cols] = lookup[df_frame.loc[idx_propagate,'parent_id_0'].values]
# split, merge, or new
idx_new = ((df_frame.duplicated(['parent_id_0','parent_id_1'],keep=False))
|
((df_frame[['parent_id_0','parent_id_1']]==-1).sum(axis=1)!=1)
).values
lookup[df_frame[idx_new].index.values] = df_frame.loc[idx_new,set_cols] = np.array([list(range(current,current+idx_new.sum())),
lookup[df_frame.loc[idx_new,'parent_id_0'].values,0],
lookup[df_frame.loc[idx_new,'parent_id_1'].values,0]
]).T
current += idx_new.sum()
arr_frames.append(df_frame)
return pd.concat(arr_frames).reset_index()
# recover parent relationships
## during some iterations of trackmate, saving of parent cell identities was unintentionally
## commented out. these functions infer these relationships. For a single tile, correctly assigned
## same parent-child relationships as trackmate for >99.8% of cells. Well-constrained problem.
def recover_parents(df_tracked,threshold=60, cell='cell', ij=('i','j'), keep_cols=['well','tile','track_id','cell']):
# to be run on a table from a single tile
# get junction cells
df_pre_junction = (df_tracked
.groupby(['track_id',cell],group_keys=False)
.apply(lambda x: x.nlargest(1,'frame'))
)
df_post_junction = (df_tracked
.groupby(['track_id',cell],group_keys=False)
.apply(lambda x: x.nsmallest(1,'frame'))
)
arr = []
# assign frame 0 cells or un-tracked cells with no parents
arr.append(df_post_junction
.query('frame==0 | track_id==-1')
[keep_cols]
.assign(parent_cell_0=-1,parent_cell_1=-1)
)
# clean up tables
last_frame = int(df_tracked['frame'].nlargest(1))
df_pre_junction = df_pre_junction.query('frame!=@last_frame & track_id!=-1')
df_post_junction = df_post_junction.query('frame!=0 & track_id!=-1')
# categorize frames to avoid issues with no-cell junction frames
df_pre_junction.loc[:,'frame'] = pd.Categorical(df_pre_junction['frame'],
categories=np.arange(0,last_frame),
ordered=True)
df_post_junction.loc[:,'frame'] = pd.Categorical(df_post_junction['frame'],
categories=np.arange(1,last_frame+1),
ordered=True)
for (frame_pre,df_pre),(frame_post,df_post) in zip(df_pre_junction.groupby('frame'),
df_post_junction.groupby('frame')):
if df_post.pipe(len)==0:
continue
elif df_pre.pipe(len)==0:
arr.append(df_post[keep_cols].assign(parent_cell_0=-1,parent_cell_1=-1))
else:
arr.extend(junction_parent_assignment(pd.concat([df_pre,df_post]),
frame_0=frame_pre,
threshold=threshold,
ij=ij,
cell=cell,
keep_cols=keep_cols
)
)
return | pd.concat(arr,ignore_index=True) | pandas.concat |
import argparse
from umap import UMAP
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='Visualize DAE compressed output using UMAP algorithm.')
parser.add_argument('csv_output', type=str, help='Output CSV file generated from DAE.py')
args = parser.parse_args()
df = pd.read_csv(args.csv_output, header=None)
label = df.iloc[:, 0]
data = df.iloc[:, 2:]
umap = UMAP(n_components=2, verbose=1).fit_transform(data.values)
df_umap = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@date: Sun May 23 22:45:54 2021
"""
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
import argparse
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from utils import path2excel, excel2names
def options():
parser = argparse.ArgumentParser('arguments for PCA')
parser.add_argument('--path_features', type=str, default='./results/features/', help='path to load pre-calculated features')
parser.add_argument('--path_save', type=str, default='./results/pca/', help='path to save results')
opt = parser.parse_args()
if not os.path.isdir(opt.path_save):
os.makedirs(opt.path_save)
if not os.path.isdir(opt.path_features):
raise Exception("No feature path detected.")
return opt
if __name__ == "__main__":
opt = options()
# Read labels y as DataFrame
y = pd.read_excel(opt.path_features + 'y.xlsx', engine='openpyxl').iloc[:,1:]
for name in tqdm(path2excel(opt.path_features), desc="PCA 2d..."):
if name[0:3] in ['all', 'cor', ]:
continue
# Read data X as DataFrame & scale them using Standard Scaler
X = | pd.read_excel(opt.path_features + name + '.xlsx', engine='openpyxl') | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 10 18:23:37 2018
@author: Denny.Lehman
"""
# packages
import pandas as pd
import numpy as np
import datetime
#==============================================================================
# df = pd.read_excel(r'C:\Users\denny.lehman\Documents\18_01 Monthly Portfolio Report\datatape.xlsx')
# df_clean = pd.DataFrame(columns=['System Project','Contract','Committed Capital','Recurring Payment','Escalator','InService Date','Term (Months)','First Payment Date'])
# df_clean['Committed Capital'] = df['Capital ($)']
# df_clean['System Project'] = df['ID']
# df_clean['Contract'] = df['Type']
# df_clean['Escalator'] = df['%']
# df_clean['InService Date'] = df['Month.1']
# df_clean['Term (Months)'] = 300
# df_clean['First Payment Date'] = df_clean['InService Date']
# df_clean['Recurring Payment'] = df['Year 1']
#
# rng1 = pd.date_range(start='1/1/2013',end='1/1/2045',freq='M')
# df4 = pd.DataFrame()
#==============================================================================
# interest rate
# duration of loan
#
def main():
rng = pd.date_range('2/28/2018', periods= 300, freq= 'M')
Escalator = 0.029
Recurring_Payment = 95
Committed_Capital = -1500
CC_date = pd.to_datetime('01/31/2018')
Start_Date = | pd.to_datetime('02/28/2018') | pandas.to_datetime |
import datetime as dt
import glob
import os
import shutil
import unittest
import numpy as np
import pandas as pd
import devicely
class EverionTestCase(unittest.TestCase):
READ_PATH = 'tests/Everion_test_data'
BROKEN_READ_PATH = 'tests/Everion_test_data_broken' #for testing with missing files
WRITE_PATH = 'tests/Everion_test_data_write'
def setUp(self):
self.reader = devicely.EverionReader(self.READ_PATH)
def test_basic_read(self):
self._test_read_individual_dataframes(self.reader)
expected_signal_tags = ['heart_rate', 'respiration_rate', 'heart_rate_variability',
'oxygen_saturation', 'gsr_electrode', 'temperature_object',
'barometer_pressure', 'temperature_local', 'ctemp',
'temperature_barometer']
expected_signal_quality_tags = ['heart_rate_quality', 'respiration_rate_quality',
'heart_rate_variability_quality', 'oxygen_saturation_quality',
'ctemp_quality']
expected_sensor_tags = ['accz_data', 'led2_data', 'led1_data', 'led4_data',
'accy_data', 'accx_data', 'led3_data', 'acc_mag']
expected_feature_tags = ['inter_pulse_interval', 'inter_pulse_interval_deviation']
expected_columns = set(expected_signal_tags + expected_signal_quality_tags +
expected_sensor_tags + expected_feature_tags)
self.assertEqual(set(self.reader.data.columns), expected_columns)
def test_read_with_non_default_tags(self):
signal_tags = [12, 15, 19, 119, 134]
sensor_tags = [80, 83, 84, 85, 92]
feature_tags = [17]
reader = devicely.EverionReader(self.READ_PATH,
signal_tags=signal_tags,
sensor_tags=sensor_tags,
feature_tags=feature_tags)
# The individual should dataframes contain all tags, regardless of the initialization parameters.
self._test_read_individual_dataframes(reader)
expected_singal_columns = ['respiration_rate', 'temperature_local',
'ctemp', 'temperature_barometer']
expected_signal_quality_columns = ['respiration_rate_quality', 'ctemp_quality']
# no acc_mag because 86 (accz_data) is missing
expected_sensor_columns = ['led1_data', 'led4_data', 'accy_data', 'accx_data']
#17 is a valid feature column, but it is not present in the testing csv
expected_feature_columns = []
expected_columns = set(expected_singal_columns + expected_signal_quality_columns +
expected_sensor_columns + expected_feature_columns)
self.assertEqual(set(reader.data.columns), expected_columns)
def test_read_with_invalid_tags(self):
signal_tags = [12, 15, 19, 119, 134, 80] #80 is not a signal tag
sensor_tags = [80, 83, 84, 85, 92, 70] #70 is not a sensor tag
feature_tags = [17, 86] #86 is not a sensor tag
call = lambda: devicely.EverionReader(self.READ_PATH,
signal_tags=signal_tags,
sensor_tags=sensor_tags,
feature_tags=feature_tags)
self.assertRaises(KeyError, call)
def test_read_with_missing_files(self):
print(os.listdir())
shutil.copytree(self.READ_PATH, self.BROKEN_READ_PATH)
signals_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*signals*")).pop()
attributes_dailys_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*attributes_dailys*")).pop()
os.remove(signals_path)
os.remove(attributes_dailys_path)
reader = devicely.EverionReader(self.BROKEN_READ_PATH)
self.assertIsNone(reader.signals)
self.assertIsNone(reader.attributes_dailys)
expected_sensor_tags = ['accz_data', 'led2_data', 'led1_data', 'led4_data',
'accy_data', 'accx_data', 'led3_data', 'acc_mag']
expected_feature_tags = ['inter_pulse_interval', 'inter_pulse_interval_deviation']
expected_columns = set(expected_sensor_tags + expected_feature_tags)
self.assertEqual(set(reader.data.columns), expected_columns)
shutil.rmtree(self.BROKEN_READ_PATH)
def test_read_with_all_join_files_missing(self):
#The signals-, sensors-, and features files are the three join files.
shutil.copytree(self.READ_PATH, self.BROKEN_READ_PATH)
signals_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*signals*")).pop()
sensors_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*sensor_data*")).pop()
features_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*features*")).pop()
os.remove(signals_path)
os.remove(sensors_path)
os.remove(features_path)
reader = devicely.EverionReader(self.BROKEN_READ_PATH)
self.assertIsNone(reader.signals)
self.assertIsNone(reader.sensors)
self.assertIsNone(reader.features)
pd.testing.assert_frame_equal(reader.data, pd.DataFrame())
shutil.rmtree(self.BROKEN_READ_PATH)
def test_timeshift_to_timestamp(self):
expected_aggregates_head = pd.DataFrame({
'count': 5 * [4468],
'streamType': 5 * [5],
'tag': [40, 18, 21, 7, 100],
'time': pd.to_datetime(5 * [1525200281], unit='s'),
'values': [-2.0, 0.76, 21.0, 60.0, 0.0],
'quality': [np.nan, 13.0, np.nan, 0.0, np.nan]
})
expected_analytics_events_head = pd.DataFrame({
"count": [5622, 5621, 5620, 5619, 5618],
"streamType": 5 * [7],
"tag": 5 * [1],
"time": pd.to_datetime([1525204397, 1525204397, 1525204148, 1525204131, 1525203790], unit='s'),
"values": [22.0, 2.0, 22.0, 22.0, 2.0]
})
expected_attributes_dailys_head = pd.DataFrame({
"count": [14577, 14576, 14575, 14574, 14573],
"streamType": 5 * [8],
"tag": 5 * [67],
"time": pd.to_datetime(5 * [1525207721], unit='s'),
"values": [2.0, 4.0, 3.0, 11.0, 12.0],
"quality": [15.0, 9.0, 8.0, 6.0, 5.0]
})
expected_everion_events_head = pd.DataFrame({
"count": 5 * [46912],
"streamType": 5 * [6],
"tag": [128, 131, 129, 132, 126],
"time": pd.to_datetime(5 * [1525192729], unit='s'),
"values": [65295.0, 900.0, 44310.0, 4096.0, 0.0]
})
expected_features_head = pd.DataFrame({
"count": [787000, 787001, 787002, 787003, 787004],
"streamType": 5 * [4],
"tag": 5 * [14],
"time": | pd.to_datetime([1525192675, 1525192675, 1525192676, 1525192677, 1525192678], unit='s') | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.