prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import os
from datetime import datetime
def read_netflix_data():
# set directory path
DIR_ = './data/netflix/training_set/'
files = list(map(lambda x: DIR_+x, os.listdir(DIR_)))
frame = []
for file in files:
#file = files[0]
movie_id = int(file.replace(DIR_, '').split('_')[1].replace('.txt', ''))
data = pd.read_csv(file, names=['UserID', 'Rating', 'Timestamp'], skiprows=1)
data['MovieID'] = movie_id
frame.append(data)
rating = pd.concat(frame, axis=0)
rating['Timestamp'] = pd.to_datetime(rating['Timestamp'], format='%Y-%m-%d')
base_dt = datetime(1900,1,1)
rating['Timestamp'] = (rating['Timestamp'] - base_dt).dt.total_seconds()
user = pd.DataFrame()
movie = pd.DataFrame()
return rating, user, movie
def read_mllatest_data():
# set file path
PATH_rating = './data/ml-latest/ratings.csv'
PATH_movie = './data/ml-latest/movies.csv'
# set columns
COLS_rating = ['UserID', 'MovieID', 'Rating', 'Timestamp']
COLS_movie = ['MovieID', 'Title', 'Genres']
# read DataSet
rating = pd.read_csv(PATH_rating, sep=',', names=COLS_rating, skiprows=1)
user = pd.DataFrame()
movie = pd.read_csv(PATH_movie, sep=',', names=COLS_movie, skiprows=1)
return rating, user, movie
def read_ml20m_data():
# set file path
PATH_rating = './data/ml-20m/ratings.csv'
PATH_movie = './data/ml-20m/movies.csv'
# set columns
COLS_rating = ['UserID', 'MovieID', 'Rating', 'Timestamp']
COLS_movie = ['MovieID', 'Title', 'Genres']
# read DataSet
rating = pd.read_csv(PATH_rating, sep=',', names=COLS_rating, skiprows=1)
user = pd.DataFrame()
movie = pd.read_csv(PATH_movie, sep=',', names=COLS_movie, skiprows=1)
return rating, user, movie
def read_ml100k_data():
# set file path
PATH_rating = './data/ml-100k/u.data'
PATH_user = './data/ml-100k/u.user'
PATH_movie = './data/ml-100k/u.item'
# set columns
COLS_rating = ['UserID', 'MovieID', 'Rating', 'Timestamp']
COLS_user = ['UserID', 'Age', 'Gender', 'Occupation', 'Zip-code']
GenreCOLS = ['Genre%02d'%d for d in range(19)]
COLS_movie = ['MovieID', 'Title', 'release-date', 'unknown0', 'unknown1'] + GenreCOLS
# read DataSet
rating = pd.read_csv(PATH_rating, sep='\t', names=COLS_rating)
user = | pd.read_csv(PATH_user, sep='|', names=COLS_user) | pandas.read_csv |
import numpy as np
import pandas
def normalize_features(array):
"""
Normalize the features in our data set.
"""
array_normalized = (array-array.mean())/array.std()
mu = array.mean()
sigma = array.std()
return array_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values, and the values for our thetas.
"""
m = len(values)
sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
"""
m = len(values)
cost_history = []
for i in range(num_iterations):
predicted_values = np.dot(features, theta)
theta = theta - alpha / m * np.dot((predicted_values - values), features)
cost = compute_cost(features, values, theta)
cost_history.append(cost)
return theta, pandas.Series(cost_history)
def predictions(dataframe):
dummy_units = | pandas.get_dummies(dataframe['UNIT'], prefix='unit') | pandas.get_dummies |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
| pd.Timestamp('2015-04-01') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in | range(3) | pandas.compat.range |
"""Tests for functions in preprocess.py."""
import puget.preprocess as pp
import puget
import os
import os.path as op
import pandas as pd
import pandas.util.testing as pdt
import numpy as np
import tempfile
import json
from numpy.testing import assert_equal
import pytest
def test_std_path_setup():
filename = 'test'
data_dir = 'data'
# test with one year
paths = ['test_2012']
file_spec = pp.std_path_setup(filename, data_dir, paths)
test_dict = {'test_2012': op.join(data_dir, 'test_2012', filename)}
assert_equal(file_spec, test_dict)
# test with limited years
paths = ['test_2012', 'test_2013']
file_spec = pp.std_path_setup(filename, data_dir, paths)
test_dict = {'test_2012': op.join(data_dir, 'test_2012', filename),
'test_2013': op.join(data_dir, 'test_2013', filename)}
assert_equal(file_spec, test_dict)
# test with all years
paths = ['test_2011', 'test_2012', 'test_2013', 'test_2014']
file_spec = pp.std_path_setup(filename, data_dir, paths)
test_dict = {'test_2011': op.join(data_dir, 'test_2011', filename),
'test_2012': op.join(data_dir, 'test_2012', filename),
'test_2013': op.join(data_dir, 'test_2013', filename),
'test_2014': op.join(data_dir, 'test_2014', filename)}
assert_equal(file_spec, test_dict)
def test_read_table():
"""Test read_table function."""
# create temporary csv file
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df = pd.DataFrame({'id': [1, 1, 2, 2],
'time1': ['2001-01-13', '2004-05-21', '2003-06-10',
'2003-06-10'], 'drop1': [2, 3, 4, 5],
'ig_dedup1': [5, 6, 7, 8], 'categ1': [0, 8, 0, 0]})
df.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
file_spec = {'2011': temp_csv_file.name}
df = pp.read_table(file_spec, data_dir=None, paths=None,
columns_to_drop=['drop1'], categorical_var=['categ1'],
time_var=['time1'],
duplicate_check_columns=['id', 'time1', 'categ1'])
df_test = pd.DataFrame({'id': [1, 1, 2],
'time1':
pd.to_datetime(['2001-01-13', '2004-05-21',
'2003-06-10'], errors='coerce'),
'ig_dedup1': [5, 6, 8], 'categ1': [0, np.nan, 0]})
# Have to change the index to match the one we de-duplicated
df_test.index = pd.Int64Index([0, 1, 3])
pdt.assert_frame_equal(df, df_test)
# test passing a string filename with data_dir and path
path, fname = op.split(temp_csv_file.name)
path0, path1 = op.split(path)
df = pp.read_table(fname, data_dir=path0, paths=[path1],
columns_to_drop=['drop1'], categorical_var=['categ1'],
time_var=['time1'],
duplicate_check_columns=['id', 'time1', 'categ1'])
temp_csv_file.close()
# test error checking
with pytest.raises(ValueError):
pp.read_table(file_spec,
data_dir=op.join(pp.DATA_PATH, 'king'))
# test error checking
with pytest.raises(ValueError):
pp.read_table('test', data_dir=None, paths=None)
def test_read_entry_exit():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 11, 12],
'stage': [0, 1, 0], 'value': [0, 1, 0]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'categorical_var': ['value'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'id'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.read_entry_exit_table(file_spec=file_spec, data_dir=None,
paths=None,
metadata=temp_meta_file.name)
# make sure values are floats
df_test = pd.DataFrame({'id': [11, 12], 'value_entry': [0, 0],
'value_exit': [1, np.NaN]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'categorical_var': ['value']}
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.read_entry_exit_table(file_spec=file_spec,
metadata=temp_meta_file2.name)
temp_csv_file.close()
temp_meta_file.close()
temp_meta_file2.close()
def test_get_enrollment():
"""Test get_enrollment function."""
# create temporary csv file & metadata file to read in
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
df = pd.DataFrame({'id': [1, 1, 2, 2],
'time1': ['2001-01-13', '2004-05-21', '2003-06-10',
'2003-06-10'], 'drop1': [2, 3, 4, 5],
'ig_dedup1': [5, 6, 7, 8], 'categ1': [0, 8, 0, 0]})
df.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
metadata = ({'name': 'test',
'person_enrollment_ID': 'id',
'person_ID': 'id',
'program_ID': 'id',
'duplicate_check_columns': ['id', 'time1', 'categ1'],
'columns_to_drop': ['drop1'],
'categorical_var': ['categ1'], 'time_var': ['time1'],
'groupID_column': 'id',
'entry_date': 'time1'
})
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
# first try with groups=True (default)
df = pp.get_enrollment(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'id': [1, 1], 'time1':
pd.to_datetime(['2001-01-13', '2004-05-21'],
errors='coerce'), 'ig_dedup1': [5, 6],
'categ1': [0, np.nan]})
pdt.assert_frame_equal(df, df_test)
# try again with groups=False
df = pp.get_enrollment(groups=False, file_spec=file_spec, data_dir=None,
paths=None, metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'id': [1, 1, 2],
'time1':
pd.to_datetime(['2001-01-13', '2004-05-21',
'2003-06-10'], errors='coerce'),
'ig_dedup1': [5, 6, 8],
'categ1': [0, np.nan, 0]})
# Have to change the index to match the one we de-duplicated
df_test.index = pd.Int64Index([0, 1, 3])
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_exit():
"""test get_exit function."""
# create temporary csv file & metadata file to read in
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
dest_rand_ints = np.random.random_integers(1, 30, 3)
df_init = pd.DataFrame({'id': [11, 12, 13], 'dest': dest_rand_ints})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
metadata = ({'name': 'test', 'duplicate_check_columns': ['id'],
"destination_column": 'dest', 'person_enrollment_ID': ['id']})
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_exit(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
mapping_table = pd.read_csv(op.join(puget.data.DATA_PATH, 'metadata',
'destination_mappings.csv'))
map_table_test_ints = [2, 25, 26]
map_table_test = pd.DataFrame({'Standard': np.array(['New Standards']*3),
'DestinationNumeric': np.array(
map_table_test_ints).astype(float),
'DestinationDescription': ['Transitional housing for homeless persons (including homeless youth)',
'Long-term care facility or nursing home',
'Moved from one HOPWA funded project to HOPWA PH'],
'DestinationGroup': ['Temporary',
'Permanent',
'Permanent'],
'DestinationSuccess': ['Other Exit',
'Successful Exit',
'Successful Exit'],
'Subsidy': ['No', 'No', 'Yes']})
map_table_subset = mapping_table[mapping_table['DestinationNumeric'] ==
map_table_test_ints[0]]
map_table_subset = map_table_subset.append(mapping_table[
mapping_table['DestinationNumeric'] == map_table_test_ints[1]])
map_table_subset = map_table_subset.append(mapping_table[
mapping_table['DestinationNumeric'] == map_table_test_ints[2]])
# Have to change the index to match the one we made up
map_table_subset.index = pd.Int64Index([0, 1, 2])
# sort because column order is not assured because started with dicts
map_table_test = map_table_test.sort_index(axis=1)
map_table_subset = map_table_subset.sort_index(axis=1)
pdt.assert_frame_equal(map_table_subset, map_table_test)
mapping_table = mapping_table[mapping_table.Standard == 'New Standards']
mapping_table['Subsidy'] = mapping_table['Subsidy'].map({'Yes': True,
'No': False})
mapping_table = mapping_table.drop(['Standard'], axis=1)
df_test = pd.DataFrame({'id': [11, 12, 13],
'dest': dest_rand_ints})
df_test = pd.merge(left=df_test, right=mapping_table, how='left',
left_on='dest', right_on='DestinationNumeric')
df_test = df_test.drop('dest', axis=1)
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_client():
# create temporary csv files & metadata file to read in
temp_csv_file1 = tempfile.NamedTemporaryFile(mode='w')
temp_csv_file2 = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 12, 13, 15, 16, 17],
'first_name':['AAA', 'BBB', 'CCC',
'EEE', 'FFF', 'noname'],
'dob_col': ['1990-01-13', '2012-05-21',
'1850-06-14', '1965-11-22',
'1948-09-03', '2012-03-18'],
'time_col': ['1996-01-13', '2014-05-21',
'1950-06-14', '1985-11-22',
'1978-09-03', '2014-03-18'],
'bool_col': [1, 99, 1, 8, 0, 1],
'numeric': [99, 3, 6, 0, 8, np.NaN]})
df2_init = pd.DataFrame({'id': [11, 12, 13, 14, 15, 16, 17, 18],
'first_name':['AAA', 'BBB', 'CCC', 'DDD',
'EEE', 'FFF', 'noname', 'HHH'],
'dob_col': ['1990-01-15', '2012-05-21',
'1850-06-14', '1975-12-08',
'1967-11-22', pd.NaT, '2010-03-18',
'2014-04-30'],
'time_col': ['1996-01-15', '2014-05-21',
'1950-06-14', '1995-12-08',
'1987-11-22', pd.NaT, '2012-03-18',
'2015-04-30'],
'bool_col': [0, 0, 1, 0, 8, 0, np.NaN, 1],
'numeric': [5, 3, 7, 1, 0, 8, 6, 0]})
df_init.to_csv(temp_csv_file1, index=False)
temp_csv_file1.seek(0)
df2_init.to_csv(temp_csv_file2, index=False)
temp_csv_file2.seek(0)
file_spec = {'2011': temp_csv_file1.name, '2012': temp_csv_file2.name}
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = ({'name': 'test', 'person_ID': 'id',
'duplicate_check_columns': ['id', 'dob_col', 'first_name'],
'columns_to_drop': [],
'categorical_var': ['bool_col', 'numeric'],
'time_var': ['dob_col', 'time_col'],
'boolean': ['bool_col'], 'numeric_code': ['numeric'],
'dob_column': 'dob_col',
'name_columns': ["first_name"]})
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
for name_exclusion in [False, True]:
# get path & filenames
df = pp.get_client(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name,
name_exclusion=name_exclusion)
df_test = pd.DataFrame({'id': [11, 11, 12, 13, 14, 15, 15, 16, 16, 17, 17,
18],
'first_name':['AAA', 'AAA', 'BBB', 'CCC', 'DDD',
'EEE', 'EEE', 'FFF', 'FFF', 'noname', 'noname',
'HHH'],
'dob_col': pd.to_datetime(['1990-01-13',
'1990-01-15',
'2012-05-21',
'1850-06-14',
'1975-12-08',
'1965-11-22',
'1967-11-22',
'1948-09-03', pd.NaT,
'2012-03-18',
'2010-03-18',
'2014-04-30']),
'time_col': pd.to_datetime(['1996-01-14',
'1996-01-14',
'2014-05-21',
'1950-06-14',
'1995-12-08', pd.NaT,
pd.NaT, '1978-09-03',
'1978-09-03', pd.NaT,
pd.NaT, '2015-04-30']),
'bool_col': [1, 1, 0, 1, 0, np.NaN, np.NaN, 0, 0,
1, 1, 1],
'numeric': [5, 5, 3, np.NaN, 1, 0, 0, np.NaN,
np.NaN, 6, 6, 0]})
if name_exclusion:
df_test = df_test[~(df_test['first_name'] == 'noname')]
# Have to sort & change the indexes to match
df = df.sort_values(by=['id', 'dob_col'])
df = df.reset_index(drop=True)
df_test = df_test.sort_values(by=['id', 'dob_col'])
df_test = df_test.reset_index(drop=True)
print(df.dtypes)
print(df_test.dtypes)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = ({'name': 'test',
'duplicate_check_columns': ['id', 'dob_col'],
'categorical_var': ['bool_col', 'numeric'],
'time_var': ['time_col'],
'boolean': ['bool_col'], 'numeric_code': ['numeric'],
'dob_column': 'dob_col'})
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.get_client(file_spec=file_spec,
data_dir=None,
paths=None,
metadata_file=temp_meta_file2.name)
temp_csv_file1.close()
temp_csv_file2.close()
temp_meta_file.close()
def test_get_disabilities():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'pid': [11, 11, 11, 11, 12, 12, 12, 12],
'stage': [10, 10, 20, 20, 10, 10, 20, 20],
'type': [5, 6, 5, 6, 5, 6, 5, 6],
'response': [0, 1, 0, 1, 99, 0, 0, 1]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'type'],
'columns_to_drop': [],
'categorical_var': ['response'],
'collection_stage_column': 'stage', 'entry_stage_val': 10,
'exit_stage_val': 20, 'update_stage_val': 30,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'type_column': 'type', 'response_column': 'response',
'person_enrollment_ID': 'pid'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_disabilities(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
type_dict = {5: 'Physical', 6: 'Developmental', 7: 'ChronicHealth',
8: 'HIVAIDS', 9: 'MentalHealth', 10: 'SubstanceAbuse'}
# make sure values are floats
df_test = pd.DataFrame({'pid': [11, 12], 'Physical_entry': [0, np.NaN],
'Physical_exit': [0.0, 0.0],
'Developmental_entry': [1.0, 0.0],
'Developmental_exit': [1.0, 1.0]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'type'],
'categorical_var': ['response']}
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.get_disabilities(file_spec=file_spec,
data_dir=None, paths=None,
metadata_file=temp_meta_file2.name)
temp_csv_file.close()
temp_meta_file.close()
temp_meta_file2.close()
def test_get_employment_education():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 11, 12],
'stage': [0, 1, 0], 'value': [0, 1, 0]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'columns_to_drop': [],
'categorical_var': ['value'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'id'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_employment_education(file_spec=file_spec, data_dir=None,
paths=None,
metadata_file=temp_meta_file.name)
# make sure values are floats
df_test = pd.DataFrame({'id': [11, 12], 'value_entry': [0, 0],
'value_exit': [1, np.NaN]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_health_dv():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 11, 12],
'stage': [0, 1, 0], 'value': [0, 1, 0]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'columns_to_drop': [],
'categorical_var': ['value'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'id'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_health_dv(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
# make sure values are floats
df_test = pd.DataFrame({'id': [11, 12], 'value_entry': [0, 0],
'value_exit': [1, np.NaN]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_income():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'pid': [11, 11, 11, 12, 12, 12, 12],
'stage': [0, 0, 1, 0, 0, 1, 1],
'income': [1, 1, 1, 0, 1, np.NaN, 1],
'incomeAmount': [5, 8, 12, 0, 6, 0, 3]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'income',
'incomeAmount'],
'columns_to_drop': [],
'categorical_var': ['income'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'pid',
'columns_to_take_max': ['income', 'incomeAmount']}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_income(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'pid': [11, 12],
'income_entry': [1.0, 1.0],
'income_exit': [1.0, 1.0],
'incomeAmount_entry': [8, 6],
'incomeAmount_exit': [12, 3]})
# Have to change the index to match the one we de-duplicated
df_test.index = pd.Int64Index([0, 2])
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'type'],
'categorical_var': ['response']}
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.get_income(file_spec=file_spec,
data_dir=None, paths=None,
metadata_file=temp_meta_file2.name)
temp_csv_file.close()
temp_meta_file.close()
temp_meta_file2.close()
def test_get_project():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'pid': [3, 4], 'name': ['shelter1', 'rrh2'],
'ProjectType': [1, 13]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test', 'program_ID': 'pid',
'duplicate_check_columns': ['pid', 'name', 'ProjectType'],
'columns_to_drop': [],
'project_type_column': 'ProjectType'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_project(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'pid': [3, 4], 'name': ['shelter1', 'rrh2'],
'ProjectNumeric': [1, 13],
'ProjectType': ['Emergency Shelter',
'PH - Rapid Re-Housing']})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
def test_merge():
with tempfile.TemporaryDirectory() as temp_dir:
year_str = '2011'
paths = [year_str]
dir_year = op.join(temp_dir, year_str)
os.makedirs(dir_year, exist_ok=True)
# make up all the csv files and metadata files
enrollment_df = pd.DataFrame({'personID': [1, 2, 3, 4],
'person_enrollID': [10, 20, 30, 40],
'programID': [100, 200, 200, 100],
'groupID': [1000, 2000, 3000, 4000],
'entrydate': ['2011-01-13',
'2011-06-10',
'2011-12-05',
'2011-09-10']})
# print(enrollment_df)
enrollment_metadata = {'name': 'enrollment',
'person_enrollment_ID': 'person_enrollID',
'person_ID': 'personID',
'program_ID': 'programID',
'groupID_column': 'groupID',
'duplicate_check_columns': ['personID',
'person_enrollID',
'programID',
'groupID'],
'columns_to_drop': [],
'time_var': ['entrydate'],
'entry_date': 'entrydate'}
enrollment_csv_file = op.join(dir_year, 'Enrollment.csv')
enrollment_df.to_csv(enrollment_csv_file, index=False)
enrollment_meta_file = op.join(dir_year, 'Enrollment.json')
with open(enrollment_meta_file, 'w') as outfile:
json.dump(enrollment_metadata, outfile)
exit_df = pd.DataFrame({'ppid': [10, 20, 30, 40],
'dest_num': [12, 27, 20, 10],
'exitdate': ['2011-08-01', '2011-12-21',
'2011-12-27', '2011-11-30']})
exit_metadata = {'name': 'exit', 'person_enrollment_ID': 'ppid',
'destination_column': 'dest_num',
'duplicate_check_columns': ['ppid'],
'columns_to_drop': [],
'time_var': ['exitdate']}
exit_csv_file = op.join(dir_year, 'Exit.csv')
exit_df.to_csv(exit_csv_file, index=False)
exit_meta_file = op.join(dir_year, 'Exit.json')
with open(exit_meta_file, 'w') as outfile:
json.dump(exit_metadata, outfile)
# need to test removal of bad dobs & combining of client records here
client_df = pd.DataFrame({'pid': [1, 1, 2, 2, 3, 3, 4, 4],
'dob': ['1990-03-13', '2012-04-16',
'1955-08-21', '1855-08-21',
'2001-02-16', '2003-02-16',
'1983-04-04', '1983-04-06'],
'gender': [0, 0, 1, 1, 1, 1, 0, 0],
'veteran': [0, 0, 1, 1, 0, 0, 0, 0],
'first_name':["AAA", "AAA",
"noname", "noname",
"CCC", "CCC",
"DDD", "DDD"]})
client_metadata = {'name': 'client', 'person_ID': 'pid',
'dob_column': 'dob',
'time_var': ['dob'],
'categorical_var': ['gender', 'veteran'],
'boolean': ['veteran'],
'numeric_code': ['gender'],
'duplicate_check_columns': ['pid', 'dob'],
'name_columns' :["first_name"]}
client_csv_file = op.join(dir_year, 'Client.csv')
client_df.to_csv(client_csv_file, index=False)
client_meta_file = op.join(dir_year, 'Client.json')
with open(client_meta_file, 'w') as outfile:
json.dump(client_metadata, outfile)
disabilities_df = pd.DataFrame({'person_enrollID': [10, 10, 20, 20,
30, 30, 40, 40],
'stage': [0, 1, 0, 1, 0, 1, 0, 1],
'type': [5, 5, 5, 5, 5, 5, 5, 5],
'response': [0, 0, 1, 1, 0, 0, 0, 1]})
disabilities_metadata = {'name': 'disabilities',
'person_enrollment_ID': 'person_enrollID',
'categorical_var': ['response'],
'collection_stage_column': 'stage',
'entry_stage_val': 0, "exit_stage_val": 1,
'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'type_column': 'type',
'response_column': 'response',
'duplicate_check_columns': ['person_enrollID',
'stage', 'type'],
'columns_to_drop': []}
disabilities_csv_file = op.join(dir_year, 'Disabilities.csv')
disabilities_df.to_csv(disabilities_csv_file, index=False)
disabilities_meta_file = op.join(dir_year, 'Disabilities.json')
with open(disabilities_meta_file, 'w') as outfile:
json.dump(disabilities_metadata, outfile)
emp_edu_df = pd.DataFrame({'ppid': [10, 10, 20, 20, 30, 30, 40, 40],
'stage': [0, 1, 0, 1, 0, 1, 0, 1],
'employed': [0, 0, 0, 1, 1, 1, 0, 1]})
emp_edu_metadata = {'name': 'employment_education',
'person_enrollment_ID': 'ppid',
'categorical_var': ['employed'],
'collection_stage_column': 'stage',
'entry_stage_val': 0, "exit_stage_val": 1,
'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'duplicate_check_columns': ['ppid', 'stage'],
'columns_to_drop': []}
emp_edu_csv_file = op.join(dir_year, 'EmploymentEducation.csv')
emp_edu_df.to_csv(emp_edu_csv_file, index=False)
emp_edu_meta_file = op.join(dir_year, 'EmploymentEducation.json')
with open(emp_edu_meta_file, 'w') as outfile:
json.dump(emp_edu_metadata, outfile)
health_dv_df = pd.DataFrame({'ppid': [10, 10, 20, 20, 30, 30, 40, 40],
'stage': [0, 1, 0, 1, 0, 1, 0, 1],
'health_status': [0, 0, 0, 1, 1, 1, 0, 1]})
health_dv_metadata = {'name': 'health_dv',
'person_enrollment_ID': 'ppid',
'categorical_var': ['health_status'],
'collection_stage_column': 'stage',
'entry_stage_val': 0, 'exit_stage_val': 1,
'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'duplicate_check_columns': ['ppid', 'stage'],
'columns_to_drop': []}
health_dv_csv_file = op.join(dir_year, 'HealthAndDV.csv')
health_dv_df.to_csv(health_dv_csv_file, index=False)
health_dv_meta_file = op.join(dir_year, 'HealthAndDV.json')
with open(health_dv_meta_file, 'w') as outfile:
json.dump(health_dv_metadata, outfile)
income_df = pd.DataFrame({'ppid': [10, 10, 20, 20, 30, 30, 40, 40],
'stage': [0, 1, 0, 1, 0, 1, 0, 1],
'income': [0, 0, 0, 1000, 500, 400, 0, 300]})
income_metadata = {'name': 'income', 'person_enrollment_ID': 'ppid',
'categorical_var': ['income'],
'collection_stage_column': 'stage',
'entry_stage_val': 0, 'exit_stage_val': 1,
'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'columns_to_take_max': ['income'],
'duplicate_check_columns': ['ppid', 'stage'],
'columns_to_drop': []}
income_csv_file = op.join(dir_year, 'IncomeBenefits.csv')
income_df.to_csv(income_csv_file, index=False)
income_meta_file = op.join(dir_year, 'IncomeBenefits.json')
with open(income_meta_file, 'w') as outfile:
json.dump(income_metadata, outfile)
project_df = pd.DataFrame({'pr_id': [100, 200],
'type': [1, 2]})
project_metadata = {'name': 'project', 'program_ID': 'pr_id',
'project_type_column': 'type',
'duplicate_check_columns': ['pr_id'],
'columns_to_drop': []}
project_csv_file = op.join(dir_year, 'Project.csv')
project_df.to_csv(project_csv_file, index=False)
project_meta_file = op.join(dir_year, 'Project.json')
with open(project_meta_file, 'w') as outfile:
json.dump(project_metadata, outfile)
metadata_files = {'enrollment': enrollment_meta_file,
'exit': exit_meta_file,
'client': client_meta_file,
'disabilities': disabilities_meta_file,
'employment_education': emp_edu_meta_file,
'health_dv': health_dv_meta_file,
'income': income_meta_file,
'project': project_meta_file}
for name_exclusion in [False, True]:
df = pp.merge_tables(meta_files=metadata_files,
data_dir=temp_dir, paths=paths, groups=False,
name_exclusion=name_exclusion)
df_test = pd.DataFrame({'personID': [1, 2, 3, 4],
'first_name':["AAA", "noname",
"CCC", "DDD"],
'person_enrollID': [10, 20, 30, 40],
'programID': [100, 200, 200, 100],
'groupID': [1000, 2000, 3000, 4000],
'entrydate': pd.to_datetime(['2011-01-13',
'2011-06-10',
'2011-12-05',
'2011-09-10']),
'DestinationNumeric': [12., 27., 20, 10],
'DestinationDescription': [
'Staying or living with family, temporary tenure (e.g., room, apartment or house)',
'Moved from one HOPWA funded project to HOPWA TH',
'Rental by client, with other ongoing housing subsidy',
'Rental by client, no ongoing housing subsidy'],
'DestinationGroup': ['Temporary', 'Temporary',
'Permanent', 'Permanent'],
'DestinationSuccess': ['Other Exit',
'Other Exit',
'Successful Exit',
'Successful Exit'],
'exitdate': pd.to_datetime(['2011-08-01',
'2011-12-21',
'2011-12-27',
'2011-11-30']),
'Subsidy': [False, False, True, False],
'dob': pd.to_datetime(['1990-03-13',
'1955-08-21', pd.NaT,
'1983-04-05']),
'gender': [0, 1, 1, 0],
'veteran': [0, 1, 0, 0],
'Physical_entry': [0, 1, 0, 0],
'Physical_exit': [0, 1, 0, 1],
'employed_entry': [0, 0, 1, 0],
'employed_exit': [0, 1, 1, 1],
'health_status_entry': [0, 0, 1, 0],
'health_status_exit': [0, 1, 1, 1],
'income_entry': [0, 0, 500, 0],
'income_exit': [0, 1000, 400, 300],
'ProjectNumeric': [1, 2, 2, 1],
'ProjectType': ['Emergency Shelter',
'Transitional Housing',
'Transitional Housing',
'Emergency Shelter']})
if name_exclusion:
select = df_test['first_name'] == "noname"
df_test = df_test[~select]
df_test = df_test.reset_index(drop=True)
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
| pdt.assert_frame_equal(df, df_test) | pandas.util.testing.assert_frame_equal |
import pickle
from glob import glob
import numpy as np
import pandas as pd
import tensorflow as tf
from bert_score import BERTScorer
from tqdm import tqdm
from transformers import logging
from clfs import BERTClassifier
from data import TaskLoader
from scoring import meteor_score
logging.set_verbosity_error()
tf.compat.v1.logging.set_verbosity(0)
class Scorer(object):
def __init__(self):
self.bert_score = BERTScorer(lang="en", rescale_with_baseline=True)
self.base_model = pickle.load(open('../logs/model-bully-0-plain-' +
'vanilla-no-aug/svm.pickle', 'rb'))
self.bert_model = BERTClassifier(augmenter='skip',
task="augment-train").load()
def fp_rate(self, ref, gen):
ref_acc = sum(ref) / len(ref)
gen_acc = sum(gen) / len(gen)
return ref_acc, ref_acc - gen_acc
def clfscore(self, R_data, G_data):
rp = self.base_model.predict(R_data)
gp = self.base_model.predict(G_data)
return self.fp_rate(rp, gp)
def tfscore(self, R_data, G_data, bert=None):
if not bert:
bert = self.bert_model
rp = bert.model_predict(R_data, batch_size=50)
gp = bert.model_predict(G_data, batch_size=50)
return self.fp_rate(rp, gp)
def reference_score(self, R_data, bert):
ŷ_pln = self.bert_model.model_predict(R_data, batch_size=45)
ŷ_aug = bert.model_predict(R_data, batch_size=45)
return self.fp_rate(ŷ_pln, ŷ_aug)
def bertscore(self, R_data, G_data, avg=True):
"""Measure BERTScore."""
score = self.bert_score.score(R_data, G_data)[2]
if avg:
return round(score.mean().item(), 3)
else:
return score
def bleurtscore(self, R_data, G_data, avg=True):
"""Measure BLEURT."""
batch, scores = [], []
for i, (a_doc, x_doc) in tqdm(enumerate(zip(R_data, G_data), start=1)):
batch.append((a_doc, x_doc))
if not i % 64:
R_data, G_data = list(zip(*batch))
batch_score = self.bleurt.score(
references=R_data, candidates=G_data, batch_size=64)
scores.extend(batch_score)
batch = []
if avg:
return round(np.mean(scores), 3)
else:
return scores
def meteorscore(self, R_data, G_data, avg=True):
"""Measure METEOR."""
scores = []
for a_doc, x_doc in zip(R_data, G_data):
scores.append(meteor_score([a_doc], x_doc))
if avg:
return round(np.mean(scores), 3)
else:
return scores
def score_data():
sc = Scorer()
df_ref = pd.read_csv('../data/positive_set.csv', index_col=0)
df_sco = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib as mpl
import numpy as np
from sklearn import metrics
import itertools
import warnings
from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
# plt.style.use('ggplot')
sns.set_theme(style="darkgrid")
font = {'size' : 12}
mpl.rc('font', **font)
mpl.rc('figure', max_open_warning = 0)
pd.set_option('display.max_columns',None)
| pd.set_option('display.max_rows',25) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
cdeweb.api.representations
~~~~~~~~~~~~~~~~~~~~~~~~~~
API response formats.
:copyright: Copyright 2016 by <NAME>.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
from io import BytesIO
import logging
import dicttoxml
import pandas as pd
from flask import make_response, abort, Response
from rdkit import Chem
from rdkit.Chem import AllChem
from . import api
log = logging.getLogger(__name__)
@api.representation('application/xml')
def output_xml(data, code, headers):
resp = make_response(dicttoxml.dicttoxml(data, attr_type=False, custom_root='job'), code)
resp.headers.extend(headers)
return resp
@api.representation('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
def output_xlsx(data, code, headers):
if 'result' not in data:
abort(400, 'Result not ready')
bio = BytesIO()
writer = | pd.ExcelWriter(bio, engine='xlsxwriter') | pandas.ExcelWriter |
#!/bin/python
# -*- coding: utf-8 -*-
import warnings
import os
import time
import tqdm
import numpy as np
import pandas as pd
import scipy.stats as ss
import scipy.optimize as so
from scipy.special import gammaln
from grgrlib.core import timeprint
from grgrlib.stats import mode
def mc_error(x):
means = np.mean(x, 0)
return np.std(means) / np.sqrt(x.shape[0])
def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of
a given width
Assumes that x is sorted numpy array.
"""
n = len(x)
cred_mass = 1.0 - alpha
interval_idx_inc = int(np.floor(cred_mass * n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
# raise ValueError('Too few elements for interval calculation')
warnings.warn('Too few elements for interval calculation.')
return None, None
else:
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx + interval_idx_inc]
return hdi_min, hdi_max
def _hpd_df(x, alpha):
cnames = ['hpd_{0:g}'.format(100 * alpha / 2),
'hpd_{0:g}'.format(100 * (1 - alpha / 2))]
sx = np.sort(x.flatten())
hpd_vals = np.array(calc_min_interval(sx, alpha)).reshape(1, -1)
return pd.DataFrame(hpd_vals, columns=cnames)
def summary(self, store, pmode=None, bounds=None, alpha=0.1, top=None, show_prior=True):
# inspired by pymc3 because it looks really nice
priors = self['__data__']['estimation']['prior']
if bounds is not None or isinstance(store, tuple):
xs, fs, ns = store
ns = ns.squeeze()
fas = (-fs[:, 0]).argsort()
xs = xs[fas]
fs = fs.squeeze()[fas]
f_prs = [lambda x: | pd.Series(x, name='distribution') | pandas.Series |
'''
This script takes the mol2 files and converts to a smiles representation.
by <NAME> (01/16/17)
# rdkit does not support reading mol2 files with more than one molecule, using this code as a reference on how
to handle this: https://www.mail-archive.com/<EMAIL>/msg01510.html
'''
import os
import argparse
import pandas as pd
from tqdm import tqdm
from rdkit import Chem
import sys
import rdkit.Chem.Descriptors as descr
parser = argparse.ArgumentParser()
parser.add_argument('-i', type=str, help="path to directory containing mol2 files")
parser.add_argument('-o', type=str, help="path to directory to output smiles files")
args = parser.parse_args()
# TODO: need to extract the name of the molecule and then pair this with the smiles representation...then store in a dataframe...then output to .csv
def retrieve_mol2block(fileLikeObject, delimiter="@<TRIPOS>MOLECULE"):
"""generator which retrieves one mol2 block at a time
"""
# directly after the line @<TRIPOS>MOLECULE contains the name of the molecule
molname = None
prevline = ""
mol2 = []
for line in fileLikeObject: # line will contain the molecule name followed by a newline character
if line.startswith(delimiter) and mol2:
yield (molname.strip("/n").replace('-','_'), "".join(mol2))
molname = ""
mol2 = []
elif prevline.startswith(delimiter):
molname = line
mol2.append(line)
prevline = line
if mol2:
yield (molname, "".join(mol2))
molname = ""
if __name__ == "__main__":
output_df = | pd.DataFrame() | pandas.DataFrame |
'''
Script to extract which people passed/failed/had an error in any of the 5 sessions with the conversational
agent.
Also extracts the planning/reflection answers for all people who passed the attention
checks in a session. This is so that we can check for non-sensical answers and
reject and no further invite such people.
'''
import Utils as util
import pandas as pd
database_path = "W:/staff-umbrella/perfectfit/Exp0/2021_06_28_0814_Final_chatbot.db"
# For sessions 1 through 5 with the conversational agent
for session_num in range(1, 6):
# get IDs of people who passed/failed/had an error w.r.t. attention checks in this session
user_ids_passed, user_ids_failed, user_ids_error = util.check_attention_checks_session(database_path,
session_num = session_num)
df_user_ids_passed = pd.DataFrame(user_ids_passed, columns = ['PROLIFIC_PID'])
df_user_ids_failed = pd.DataFrame(user_ids_failed, columns = ['PROLIFIC_PID'])
# something went wrong in a session, i.e. some but not all attention check data was saved.
df_user_ids_error = pd.DataFrame(user_ids_error, columns = ['PROLIFIC_PID'])
# Save dataframes to .csv-files
df_user_ids_passed.to_csv("W:/staff-umbrella/perfectfit/Exp0/session" + str(session_num) + "_passed_p_ids.csv",
index = False)
df_user_ids_failed.to_csv("W:/staff-umbrella/perfectfit/Exp0/session" + str(session_num) + "_failed_p_ids.csv",
index = False)
df_user_ids_error.to_csv("W:/staff-umbrella/perfectfit/Exp0/session" + str(session_num) + "_error_p_ids.csv",
index = False)
# Get planning/reflection answers for this session
user_ids_answers, answers, activities, action_types = util.get_planning_reflection_answers(database_path,
session_num = session_num)
# Only need to look at the answers of people who passed the attention checks and had no error
indices = [i for i in range(len(user_ids_answers)) if user_ids_answers[i] in user_ids_passed]
user_ids_answers = list(map(user_ids_answers.__getitem__, indices))
answers = list(map(answers.__getitem__, indices))
activities = list(map(activities.__getitem__, indices))
action_types = list(map(action_types.__getitem__, indices))
df_answers = | pd.DataFrame([user_ids_answers, answers, activities, action_types]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Test data
"""
# Imports
import pandas as pd
from edbo.feature_utils import build_experiment_index
# Build data sets from indices
def aryl_amination(aryl_halide='ohe', additive='ohe', base='ohe', ligand='ohe', subset=1):
"""
Load aryl amination data with different features.
"""
# SMILES index
index = pd.read_csv('data/aryl_amination/experiment_index.csv')
# Choose supset:
ar123 = ['FC(F)(F)c1ccc(Cl)cc1','FC(F)(F)c1ccc(Br)cc1','FC(F)(F)c1ccc(I)cc1']
ar456 = ['COc1ccc(Cl)cc1','COc1ccc(Br)cc1','COc1ccc(I)cc1']
ar789 = ['CCc1ccc(Cl)cc1','CCc1ccc(Br)cc1','CCc1ccc(I)cc1']
ar101112 = ['Clc1ccccn1','Brc1ccccn1','Ic1ccccn1']
ar131415 = ['Clc1cccnc1','Brc1cccnc1','Ic1cccnc1']
def get_subset(ar):
a = index[index['Aryl_halide_SMILES'] == ar[0]]
b = index[index['Aryl_halide_SMILES'] == ar[1]]
c = index[index['Aryl_halide_SMILES'] == ar[2]]
return pd.concat([a,b,c])
if subset == 1:
index = get_subset(ar123)
elif subset == 2:
index = get_subset(ar456)
elif subset == 3:
index = get_subset(ar789)
elif subset == 4:
index = get_subset(ar101112)
elif subset == 5:
index = get_subset(ar131415)
# Aryl halide features
if aryl_halide == 'dft':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_dft.csv')
elif aryl_halide == 'mordred':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_mordred.csv')
elif aryl_halide == 'ohe':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_ohe.csv')
# Additive features
if additive == 'dft':
add_features = pd.read_csv('data/aryl_amination/additive_dft.csv')
elif additive == 'mordred':
add_features = pd.read_csv('data/aryl_amination/additive_mordred.csv')
elif additive == 'ohe':
add_features = pd.read_csv('data/aryl_amination/additive_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/aryl_amination/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/aryl_amination/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/aryl_amination/base_ohe.csv')
# Ligand features
if ligand == 'Pd(0)-dft':
ligand_features = pd.read_csv('data/aryl_amination/ligand-Pd(0)_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/aryl_amination/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = pd.read_csv('data/aryl_amination/ligand_ohe.csv')
# Build the descriptor set
index_list = [index['Aryl_halide_SMILES'],
index['Additive_SMILES'],
index['Base_SMILES'],
index['Ligand_SMILES']]
lookup_table_list = [aryl_features,
add_features,
base_features,
ligand_features]
lookup_list = ['aryl_halide_SMILES',
'additive_SMILES',
'base_SMILES',
'ligand_SMILES']
experiment_index = build_experiment_index(index['entry'],
index_list,
lookup_table_list,
lookup_list)
experiment_index['yield'] = index['yield'].values
return experiment_index
def suzuki(electrophile='ohe', nucleophile='ohe', base='ohe', ligand='ohe', solvent='ohe'):
"""
Load Suzuki data with different features.
"""
# SMILES index
index = pd.read_csv('data/suzuki/experiment_index.csv')
# Electrophile features
if electrophile == 'dft':
elec_features = pd.read_csv('data/suzuki/electrophile_dft.csv')
elif electrophile == 'mordred':
elec_features = pd.read_csv('data/suzuki/electrophile_mordred.csv')
elif electrophile == 'ohe':
elec_features = pd.read_csv('data/suzuki/electrophile_ohe.csv')
# Nucleophile features
if nucleophile == 'dft':
nuc_features = pd.read_csv('data/suzuki/nucleophile_dft.csv')
elif nucleophile == 'mordred':
nuc_features = pd.read_csv('data/suzuki/nucleophile_mordred.csv')
elif nucleophile == 'ohe':
nuc_features = pd.read_csv('data/suzuki/nucleophile_ohe.csv')
# Base features
if base == 'dft':
base_features = | pd.read_csv('data/suzuki/base_dft.csv') | pandas.read_csv |
import requests
import pandas as pd
import json
class AssemblyDataReader:
def __init__(self, key):
self.api_key = key
self.__api_ids = {
'국회의원 발의법률안':'nzmimeepazxkubdpn',
'본회의 처리안건_법률안':'nwbpacrgavhjryiph',
'본회의 처리안건_예산안':'nzgjnvnraowulzqwl',
'본회의 처리안건_결산':'nkalemivaqmoibxro',
'본회의 처리안건_기타':'nbslryaradshbpbpm',
'역대 국회의원 현황':'nprlapfmaufmqytet',
'역대 국회의원 인적사항':'npffdutiapkzbfyvr',
'역대 국회의원 위원회 경력':'nqbeopthavwwfbekw',
'의안별 표결현황':'ncocpgfiaoituanbr',
'국회의원 본회의 표결정보':'nojepdqqaweusdfbi',
'국회사무처 업무추진비 집행현황':'nalacaiwauxiynsxt',
'국회도서관 업무추진비 집행현황':'ngqoyjbkaxutcpmot',
'국회입법조사처 업무추진비 집행현황':'nlmqzojlayoicbxhw',
'국회예산정책처 업무추진비 집행현황':'nknmvzexapgiarqcd',
'국회의원 세미나 일정':'nfcoioopazrwmjrgs',
'국회의원 소규모 연구용역 결과보고서':'nfvmtaqoaldzhobsw',
'날짜별 의정활동':'nqfvrbsdafrmuzixe',
'본회의 일정':'nekcaiymatialqlxr'
}
self.en2kor = {'MEETINGSESSION': '회기',
'CHA': '차수',
'TITLE': '제목',
'MEETTING_DATE': '일자',
'MEETTING_TIME': '일시',
'UNIT_CD': '대별코드',
'UNIT_NM': '대',
'SEQ': '순번',
'DT': '일자',
'BILL_KIND': '의안활동구분',
'AGE': '대수',
'BILL_NO': '의안번호',
'BILL_NM': '의안명',
'STAGE': '단계',
'DTL_STAGE': '세부단계',
'COMMITTEE': '소관위원회',
'ACT_STATUS': '활동상태',
'BILL_ID': '의안ID',
'LINK_URL': '링크URL',
'COMMITTEE_ID': '소관위원회ID',
'RPT_NO': '다운로드',
'YEAR': '년도',
'FILE_ID': '파일ID',
'RPT_TITLE': '보고서제목',
'RG_DE': '등록일',
'ASBLM_NM': '의원명',
'QUARTER': '분기',
'DIV_NM': '구분명',
'LINK': '의원실링크',
'DESCRIPTION': '설명',
'SDATE': '개최일',
'STIME': '개최시간',
'NAME': '이름',
'LOCATION': '개최장소',
'PRDC_YM_NM': '생산년월',
'OPB_FL_NM': '공개파일명',
'INST_CD': '기관코드',
'INST_NM': '기관명',
'OPB_FL_PH': '공개파일경로',
'HG_NM': '이름',
'HJ_NM': '한자명',
'POLY_NM': '정당명',
'MEMBER_NO': '의원번호',
'POLY_CD': '소속정당코드',
'ORIG_CD': '선거구코드',
'VOTE_DATE': '의결일자',
'BILL_NAME': '의안명',
'LAW_TITLE': '법률명',
'CURR_COMMITTEE': '소관위',
'RESULT_VOTE_MOD': '표결결과',
'DEPT_CD': '부서코드(사용안함)',
'CURR_COMMITTEE_ID': '소관위원회ID',
'DISP_ORDER': '표시정렬순서',
'BILL_URL': '의안URL',
'BILL_NAME_URL': '의안링크',
'SESSION_CD': '회기',
'CURRENTS_CD': '차수',
'MONA_CD': '국회의원코드',
'PROC_DT': '처리일',
'PROC_RESULT_CD': '의결결과',
'BILL_KIND_CD': '의안종류',
'MEMBER_TCNT': '재적의원',
'VOTE_TCNT': '총투표수',
'YES_TCNT': '찬성',
'NO_TCNT': '반대',
'BLANK_TCNT': '기권',
'PROFILE_CD': '구분코드',
'PROFILE_NM': '구분',
'FRTO_DATE': '활동기간',
'PROFILE_SJ': '위원회 경력',
'PROFILE_UNIT_CD': '경력대수코드',
'PROFILE_UNIT_NM': '경력대수',
'ENG_NM': '영문명칭',
'BTH_GBN_NM': '음/양력',
'BTH_DATE': '생년월일',
'SEX_GBN_NM': '성별',
'REELE_GBN_NM': '재선',
'UNITS': '당선',
'ORIG_NM': '선거구',
'ELECT_GBN_NM': '선거구구분',
'PROPOSE_DT': '제안일',
'PROC_RESULT': '처리상태',
'DETAIL_LINK': '상세페이지',
'PROPOSER': '제안자',
'MEMBER_LIST': '제안자목록링크',
'RST_PROPOSER': '대표발의자',
'PUBL_PROPOSER': '공동발의자',
'COMMITTEE_NM': '소관위원회',
'COMMITTEE_SUBMIT_DT': '위원회심사_회부일',
'COMMITTEE_PRESENT_DT': '위원회심사_상정일',
'COMMITTEE_PROC_DT': '위원회심사_의결일',
'LAW_SUBMIT_DT': '법사위체계자구심사_회부일',
'LAW_PRESENT_DT': '법사위체계자구심사_상정일',
'LAW_PROC_DT': '법사위체계자구심사_의결일',
'RGS_PRESENT_DT': '본회의심의_상정일',
'RGS_PROC_DT': '본회의심의_의결일',
'CURR_TRANS_DT': '정부이송일',
'ANNOUNCE_DT': '공포일',
'BDG_SUBMIT_DT': '예결위심사_회부일',
'BDG_PRESENT_DT': '예결위심사_상정일',
'BDG_PROC_DT': '예결위심사_의결일',
'PROPOSER_KIND_CD': '제안자구분',
'DAESU': '대수',
'DAE': '대별 및 소속정당(단체)',
'DAE_NM': '대별',
'NAME_HAN': '이름(한자)',
'JA': '자',
'HO': '호',
'BIRTH': '생년월일',
'BON': '본관',
'POSI': '출생지',
'HAK': '학력 및 경력',
'HOBBY': '종교 및 취미',
'BOOK': '저서',
'SANG': '상훈',
'DEAD': '기타정보(사망일)',
'URL': '회원정보 확인 헌정회 홈페이지 URL'}
def __read(self, api_id, **kargs):
url = 'https://open.assembly.go.kr/portal/openapi/' + api_id
params = {
'KEY':self.api_key,
'pIndex':1,
'Type':'json',
'pSize':1000,
}
params.update(kargs)
df = pd.DataFrame()
total_count = float('inf')
while len(df) < total_count:
j = requests.get(url, params=params).json()
assert 'RESULT' not in j, j['RESULT']['MESSAGE']
total_count = j[api_id][0]['head'][0]['list_total_count']
df = df.append(pd.DataFrame(j[api_id][1]['row']))
params['pIndex'] += 1
return df.reset_index(drop=True)
def __get_params(self, key, daesu=None, **kargs):
params = {}
if key in {'국회의원 발의법률안',
'본회의 처리안건_법률안',
'본회의 처리안건_예산안',
'본회의 처리안건_결산',
'본회의 처리안건_기타',
'국회의원 본회의 표결정보',
'의안별 표결현황',
'날짜별 의정활동'}:
assert not daesu is None, 'daesu 인자가 필요합니다.'
params.update({'AGE':daesu})
elif key in {'역대 국회의원 현황'}:
assert not daesu is None, 'daesu 인자가 필요합니다.'
params.update({'DAESU':daesu})
elif key in {'역대 국회의원 인적사항',
'국회의원 소규모 연구용역 결과보고서',
'본회의 일정'}:
assert not daesu is None, 'daesu 인자가 필요합니다.'
UNIT_CD = '1'+str(daesu).zfill(5)
params.update({'UNIT_CD':UNIT_CD})
elif key in {'역대 국회의원 위원회 경력'}:
assert daesu is not None, 'daesu 인자가 필요합니다.'
PROFILE_UNIT_CD = '1'+str(daesu).zfill(5)
params.update({'PROFILE_UNIT_CD':PROFILE_UNIT_CD})
args = {k.lower() for k in kargs.keys()}
if key in {'날짜별 의정활동'}:
assert 'dt' in args, 'DT 인자가 필요합니다.'
elif key in {'국회의원 본회의 표결정보'}:
assert 'bill_id' in args, 'BILL_ID 인자가 필요합니다.'
params.update(**kargs)
return params
def read(self, key, daesu=None, **kargs):
'''
가져오고 싶은 데이터 이름을 key에 적어주세요.
데이터에 따라 추가적인 인자가 필요할 수도 있습니다.
21대 국회의원 발의법률안 가져오기
>>> adr.read('국회의원 발의법률안', daesu=21)
21대 2020년 8월 18일 의정활동 가져오기
>>> adr.read('날짜별 의정활동', daesu=21, dt='2020-08-18')
NABO 경제재정수첩 가져오기
>>> adr.read('ncnpwqimabagvdmky')
다음 링크에서 전체 예시를 볼 수 있습니다.
https://github.com/hohyun321/AssemblyDataReader
* key (str): 다음 중에서 선택. 또는 요청 주소 뒷자리 입력.
'국회사무처 업무추진비 집행현황'
'국회도서관 업무추진비 집행현황'
'국회입법조사처 업무추진비 집행현황'
'국회예산정책처 업무추진비 집행현황'
'국회의원 세미나 일정'
'국회의원 발의법률안'
'본회의 처리안건_법률안'
'본회의 처리안건_예산안'
'본회의 처리안건_결산'
'본회의 처리안건_기타'
'역대 국회의원 현황'
'역대 국회의원 인적사항'
'역대 국회의원 위원회 경력'
'의안별 표결현황'
'국회의원 소규모 연구용역 결과보고서'
'본회의 일정'
'국회의원 본회의 표결정보'
'날짜별 의정활동'
* daesu (int): 대수
선택적으로 추가 인자를 줄 수 있습니다. API 제공 사이트를 참조하세요.
https://open.assembly.go.kr/portal/openapi/openApiNaListPage.do
'''
if key in self.__api_ids:
api_id = self.__api_ids[key]
else:
api_id = key
params = self.__get_params(key, daesu=daesu, **kargs)
return self.__read(api_id, **params)
def listing(self):
'''
API 전체 목록을 가져옵니다.
https://open.assembly.go.kr/portal/openapi/openApiNaListPage.do
'''
url = 'https://open.assembly.go.kr/portal/openapi/selectInfsOpenApiListPaging.do'
headers = {'User-Agent': 'Mozilla/5.0'}
form_data = {'rows': 500, 'page':1}
r = requests.post(url, data=form_data, headers=headers)
data = r.json()['data']
df = | pd.DataFrame(data) | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import SparseArray
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
def test_unary_op(op, fill_value):
arr = np.array([0, 1, np.nan, 2])
sparray = SparseArray(arr, fill_value=fill_value)
result = op(sparray)
expected = SparseArray(op(arr), fill_value=op(fill_value))
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("fill_value", [True, False])
def test_invert(fill_value):
arr = np.array([True, False, False, True])
sparray = SparseArray(arr, fill_value=fill_value)
result = ~sparray
expected = SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
result = ~pd.Series(sparray)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
result = ~pd.DataFrame({"A": sparray})
expected = | pd.DataFrame({"A": expected}) | pandas.DataFrame |
import keras
import numpy as np
from numpy import save, load
from numpy import concatenate
from pandas import DataFrame
from pandas import read_csv
from pandas import concat
from A_Parameters import *
from B_SourceData import *
from C_StructureData import *
from D_ModelTrain import *
from E_PredicAll import *
from F_BackTesting import *
# Run mode define
RunSourceData = 1
RunStructureData = 1
RunModelTrain = 1
RunPredicNew = 1
RunBackTesting = 1
# Import parameters for Grid search
P = Parameters()
Cum_number = 0
Cum_portofolio = 0
for i in range(len(P)):
change = P[i][0]
epoch = P[i][1]
batch = P[i][2]
drop = P[i][3]
SourceStart = P[i][4]
TestFrom = P[i][5]
TestTo = P[i][6]
# Run all process
# 1) Import source data
print ("\n\n=======>>=======>>=======>>=======>>=======>>=======>>=======>>>>>>>")
if RunSourceData == 1:# and Cum_number == 0:
print ("\n******************** 1) Import source data ********************")
SourceEnd = TestTo
data_cont, data_name = SourceData(SourceStart, SourceEnd)
save("Par-DataSource.npy", [data_cont, data_name])
# 2) Structuring data
if RunStructureData == 1:# and Cum_number == 0:
print ("\n******************** 2) Structuring data ********************")
perc_train = 90
n_steps = 20
# Load parameters
param = load("Par-DataSource.npy")
data_cont, data_name = param[0], param[1]
# Structuring data
train_X, train_y, val_X, val_y, test_X, test_y, dummy_y, test_dt, dateindex = StructureData(perc_train, n_steps, data_cont, data_name, change, TestFrom, TestTo)
save("Par-DataStructure.npy", [train_X, train_y, val_X, val_y, test_X, test_y, dummy_y, test_dt, dateindex])
# 3) LSTM train and validation
if RunModelTrain == 1:
print ("\n******************** 3) Training process ********************")
# Parameter definite
ep = epoch
bs = batch
dr = drop
# Load parameters
param = load("Par-DataStructure.npy")
train_X, train_y = param[0], param[1]
val_X, val_y = param[2], param[3]
test_X, test_y = param[4], param[5]
# Train process
ModelTrain(train_X, train_y, val_X, val_y, test_X, test_y, ep, bs, dr)
# 4) Predict all data
if RunPredicNew == 1:
print ("\n******************** 4) Prediction process ********************")
# Load parameters
param = load("Par-DataStructure.npy")
train_X, train_y = param[0], param[1]
val_X, val_y = param[2], param[3]
test_X, test_y = param[4], param[5]
dummy_y, dateindex = param[6], param[8]
# Prediction
model = keras.models.load_model("model.h5")
data_X = concatenate((train_X, val_X, test_X), axis = 0)
dummy_yhat = model.predict(data_X)
# Use data to NoTestTo
NoTestTo = list(dateindex.strftime('%Y-%m-%d')).index(TestTo)+1
dummy_yhat = dummy_yhat[:NoTestTo, :]
# All_y dataframe
all_y = np.zeros(len(dateindex))
all_y[dummy_y[:,0]==1] = 1
all_y[dummy_y[:,1]==1] = 0
all_y[dummy_y[:,2]==1] = -1
df_all_y = | DataFrame(all_y, index=dateindex, columns=["y"]) | pandas.DataFrame |
import os
import pytest
import pandas as pd
import numpy as np
from feat import feat, column_names
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import FunctionTransformer
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector
from sklearn.pipeline import make_pipeline
dir = os.path.dirname(os.path.abspath(__file__))
hotels = pd.read_csv(os.path.join(dir, "hotels.csv"))
y = hotels["children"]
X = hotels.drop("children", axis=1)
nominal = ["hotel", "meal"]
numeric = ["lead_time", "average_daily_rate"]
def test_default():
assert feat(None, nominal).equals(
| pd.DataFrame({"name": nominal, "feature": nominal}) | pandas.DataFrame |
import pandas as pd
data = | pd.Series([0, 1, 1, 1, 1, 0, 1, 2, 1, 2]) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import gzip
import os
import pathlib
import shutil
import tempfile
import unittest
import pandas as pd
from q2_cutadapt._demux import (_build_demux_command, _rename_files,
_write_barcode_fasta,
_write_empty_fastq_to_mux_barcode_in_seq_fmt)
from q2_types.multiplexed_sequences import (
MultiplexedSingleEndBarcodeInSequenceDirFmt,
MultiplexedPairedEndBarcodeInSequenceDirFmt)
from q2_types.per_sample_sequences import (
SingleLanePerSampleSingleEndFastqDirFmt,
SingleLanePerSamplePairedEndFastqDirFmt,
FastqGzFormat)
from qiime2 import Artifact, CategoricalMetadataColumn
from qiime2.util import redirected_stdio
from qiime2.plugin.testing import TestPluginBase
class TestDemuxSingle(TestPluginBase):
package = 'q2_cutadapt.tests'
def assert_demux_results(self, exp_samples_and_barcodes, obs_demuxed_art):
obs_demuxed = obs_demuxed_art.view(
SingleLanePerSampleSingleEndFastqDirFmt)
obs_demuxed_seqs = obs_demuxed.sequences.iter_views(FastqGzFormat)
zipped = zip(exp_samples_and_barcodes.iteritems(), obs_demuxed_seqs)
for (sample_id, barcode), (filename, _) in zipped:
filename = str(filename)
self.assertTrue(sample_id in filename)
self.assertTrue(barcode in filename)
def assert_untrimmed_results(self, exp, obs_untrimmed_art):
obs_untrimmed = obs_untrimmed_art.view(
MultiplexedSingleEndBarcodeInSequenceDirFmt)
obs_untrimmed = obs_untrimmed.file.view(FastqGzFormat)
obs_untrimmed = gzip.decompress(obs_untrimmed.path.read_bytes())
self.assertEqual(exp, obs_untrimmed)
def setUp(self):
super().setUp()
self.demux_single_fn = self.plugin.methods['demux_single']
muxed_sequences_fp = self.get_data_path('forward.fastq.gz')
self.muxed_sequences = Artifact.import_data(
'MultiplexedSingleEndBarcodeInSequence', muxed_sequences_fp)
def test_typical(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_all_matched(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_none_matched(self):
metadata = CategoricalMetadataColumn(
pd.Series(['TTTT'], name='Barcode',
index=pd.Index(['sample_d'], name='id')))
with redirected_stdio(stderr=os.devnull):
with self.assertRaisesRegex(ValueError, 'demultiplexed'):
self.demux_single_fn(self.muxed_sequences, metadata)
def test_error_tolerance_filtering(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAG', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
# sample_a is dropped because of a substitution error (AAAA vs AAAG)
exp_samples_and_barcodes = pd.Series(['CCCC'], index=['sample_b'])
self.assert_demux_results(exp_samples_and_barcodes, obs_demuxed_art)
self.assert_untrimmed_results(b'@id1\nAAAAACGTACGT\n+\nzzzzzzzzzzzz\n'
b'@id3\nAAAAACGTACGT\n+\nzzzzzzzzzzzz\n'
b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_error_tolerance_high_enough_to_prevent_filtering(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAG', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
error_rate=0.25)
# This test should yield the same results as test_typical, above
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_extra_barcode_in_metadata(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG', 'TTTT'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c',
'sample_d'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
# TTTT/sample_d shouldn't be in the demuxed results, because there
# were no reads with that barcode present
exp_samples_and_barcodes = pd.Series(['AAAA', 'CCCC', 'GGGG'],
index=['sample_a', 'sample_b',
'sample_c'])
self.assert_demux_results(exp_samples_and_barcodes, obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_variable_length_barcodes(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAAA', 'CCCCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
muxed_sequences_fp = self.get_data_path('variable_length.fastq.gz')
muxed_sequences = Artifact.import_data(
'MultiplexedSingleEndBarcodeInSequence', muxed_sequences_fp)
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(muxed_sequences, metadata)
# This test should yield the same results as test_typical, above, just
# with variable length barcodes
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_batch_size(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
batch_size=1)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_invalid_batch_size(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index= | pd.Index(['sample_a', 'sample_b'], name='id') | pandas.Index |
import os
import shutil
#import re
import sys
import platform
import subprocess
import numpy as np
import json
import pickle
import pandas as pd
from pandas import Series
import xml.etree.ElementTree as ET
import glob
import argparse
try:
import lvdb
except:
import pdb as lvdb
print('using pdb instead of lvdb')
pass
def ensure_dir_exists (datadir):
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(datadir):
themessage = 'Directory {} could not be created.'.format(datadir)
if (int(platform.python_version()[0]) > 2):
raise NotADirectoryError(themessage)
else:
# python 2 doesn't have the impressive exception vocabulary 3 does
# so just raising a generic exception with a useful description
raise BaseException(themessage)
def rsync_the_file (from_location, to_location):
# Assuming that the responses for how platform.system() responds to
# different OSes given here are correct (though not assuming case):
# https://stackoverflow.com/questions/1854/python-what-os-am-i-running-on
if platform.system().lower() is 'windows':
print('Windows detected. The rsync command that is about to be', \
'executed assumes a Linux or Mac OS; no guarantee that it', \
'will work with Windows. Please be ready to transfer files', \
'via alternate means if necessary.')
subprocess.call(['rsync', '-vaPhz', from_location, to_location])
def df_to_pickle(thedf, thefilename):
thedf.to_pickle(thefilename);
def df_to_csv(thedf, thefilename):
thedf.to_csv(thefilename, index_label='index');
def df_to_json(thedf, thefilename):
thedf.to_json(thefilename, orient='records', double_precision = 10, force_ascii = True);
def glob2df(datadir, linecount, jobnum_list):
print(datadir)
thepaths = glob.iglob(datadir + '/*/')
results_dirs_used = []
df_list = []
progress_counter = 1000;
counter = 0;
for dirname in sorted(thepaths):
dirstructure = dirname.split('/')
lastdir = dirstructure[-1]
if '_job_' not in lastdir:
# handle trailing slash if present
lastdir = dirstructure[-2];
if '_job_' not in lastdir:
# something's wrong; skip this case
continue;
if '_task_' not in lastdir:
# something's wrong; skip this case
continue;
if 'latest' in lastdir:
continue;
filename = dirname + 'summary.csv'
if not os.path.isfile(filename):
print('No summary file at ', filename);
# no summary file means no results, unless results saved using a
# different mechanism, which is out of scope of this script
continue;
missionname = dirname + 'mission.xml'
if not os.path.isfile(missionname):
print('No mission file at ', missionname);
continue;
split_on_task = lastdir.split('_task_')
tasknum = int(split_on_task[-1])
jobnum = int(split_on_task[0].split('_job_',1)[1])
if jobnum_list and jobnum not in jobnum_list:
# lvdb.set_trace()
# print('Job {} not in list of jobs; skipping'.format(jobnum))
continue;
counter += 1;
if counter > progress_counter:
print('j ', jobnum, ', t ', tasknum)
counter = 0;
# thisjob_df = pd.DataFrame(index=range(1))
thisjob_df = pd.read_csv(filename)
if thisjob_df.empty:
# no actual content in df; maybe only header rows
continue;
# Add column to df for job number
thisjob_df['job_num']=jobnum
# and task number
thisjob_df['task_num']=tasknum
# and results directory
thisjob_df['results_dir']=lastdir
# add how many rows there are in the df so plot scripts know what to
# expect
thisjob_df['num_rows']=len(thisjob_df.index)
df_to_append = pd.DataFrame()
thisjob_params_df = xml_param_df_cols(missionname);
num_lines = len(thisjob_df.index)
if linecount > 0:
if num_lines < linecount:
continue;
df_to_append = | pd.concat([thisjob_params_df]*num_lines, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Division By Zero
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
strict=True)
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1),
pd.Timedelta(days=2)])
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_df_radd_str_invalid(self, dtype, data):
df = pd.DataFrame(data, dtype=dtype)
with pytest.raises(TypeError):
'foo_' + df
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_int(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([2, 3, 4], dtype=dtype)
result = 1 + df
tm.assert_frame_equal(result, expected)
result = df + 1
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_nan(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
result = np.nan + df
tm.assert_frame_equal(result, expected)
result = df + np.nan
tm.assert_frame_equal(result, expected)
def test_df_radd_str(self):
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', | pd.DataFrame(['xa', np.nan, 'xa']) | pandas.DataFrame |
import pytest
from nerblackbox.modules.datasets.formatter.base_formatter import SENTENCES_ROWS
from nerblackbox.modules.datasets.formatter.sucx_formatter import SUCXFormatter
from pkg_resources import resource_filename
import pandas as pd
import os
from os.path import abspath, dirname, join
BASE_DIR = abspath(dirname(dirname(dirname(__file__))))
DATA_DIR = join(BASE_DIR, "data")
os.environ["DATA_DIR"] = DATA_DIR
class TestSucxFormatter:
formatter = SUCXFormatter("original_cased")
@pytest.mark.parametrize(
"sentences_rows",
[
(
[
[
["I", "O"],
["850-modellen", "O"],
["har", "O"],
["man", "O"],
["valt", "O"],
["en", "O"],
["tredje", "O"],
["variant", "O"],
[",", "O"],
["Delta-link", "B-product"],
[".", "O"],
],
[
["Audi", "B-product"],
["Coupé", "I-product"],
["Quattro", "I-product"],
["20V", "B-product"],
],
]
),
],
)
def test_format_data(self, sentences_rows: SENTENCES_ROWS):
self.formatter.dataset_path = resource_filename(
"nerblackbox", f"tests/test_data/original_data"
)
test_sentences_rows = self.formatter.format_data(shuffle=True, write_csv=False)
assert (
test_sentences_rows == sentences_rows
), f"ERROR! test_sentences_rows = {test_sentences_rows} != {sentences_rows}"
@pytest.mark.parametrize(
"df_train, df_val, df_test",
[
(
[
pd.DataFrame(
data=[
["O O", "Mening 1"],
["PER O", "Mening 2"],
["O PER", "Mening 3"],
["O PER", "Mening 4"],
]
),
| pd.DataFrame(data=[["O O", "Mening 5"], ["PER O", "Mening 6"]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains the ReadSets class that is in charge
of reading the sets files, reshaping them to be used in
the build class, creating and reading the parameter files and
checking the errors in the definition of the sets and parameters
"""
import itertools as it
from openpyxl import load_workbook
import pandas as pd
from hypatia.error_log.Checks import (
check_nan,
check_index,
check_index_data,
check_table_name,
check_mapping_values,
check_mapping_ctgry,
check_sheet_name,
check_tech_category,
check_carrier_type,
check_years_mode_consistency,
)
from hypatia.error_log.Exceptions import WrongInputMode
import numpy as np
from hypatia.utility.constants import (
global_set_ids,
regional_set_ids,
technology_categories,
carrier_types,
)
from hypatia.utility.constants import take_trade_ids, take_ids, take_global_ids
MODES = ["Planning", "Operation"]
class ReadSets:
""" Class that reads the sets of the model, creates the parameter files with
default values and reads the filled parameter files
Attributes
------------
mode:
The mode of optimization including the operation and planning mode
path:
The path of the set files given by the user
glob_mapping : dict
A dictionary of the global set tables given by the user in the global.xlsx file
mapping : dict
A dictionary of the regional set tables given by the user in the regional
set files
connection_sheet_ids: dict
A nested dictionary that defines the sheet names of the parameter file of
the inter-regional links with their default values, indices and columns
global_sheet_ids : dict
A nested dictionary that defines the sheet names of the global parameter file
with their default values, indices and columns
regional_sheets_ids : dict
A nested dictionary that defines the sheet names of the regional parameter files
with their default values, indices and columns
trade_data : dict
A nested dictionary for storing the inter-regional link data
global_data : dict
A nested dictionary for storing the global data
data : dict
A nested dictionary for storing the regional data
"""
def __init__(self, path, mode="Planning"):
self.mode = mode
self.path = path
self._init_by_xlsx()
def _init_by_xlsx(self,):
"""
Reads and organizes the global and regional sets
"""
glob_mapping = {}
wb_glob = load_workbook(r"{}/global.xlsx".format(self.path))
sets_glob = wb_glob["Sets"]
set_glob_category = {key: value for key, value in sets_glob.tables.items()}
for entry, data_boundary in sets_glob.tables.items():
data_glob = sets_glob[data_boundary]
content = [[cell.value for cell in ent] for ent in data_glob]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
glob_mapping[entry] = df
self.glob_mapping = glob_mapping
check_years_mode_consistency(
mode=self.mode, main_years=list(self.glob_mapping["Years"]["Year"])
)
for key, value in self.glob_mapping.items():
check_table_name(
file_name="global",
allowed_names=list(global_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, "global", pd.Index(global_set_ids[key]))
check_nan(key, value, "global")
if key == "Technologies":
check_tech_category(value, technology_categories, "global")
if key == "Carriers":
check_carrier_type(value, carrier_types, "global")
self.regions = list(self.glob_mapping["Regions"]["Region"])
self.main_years = list(self.glob_mapping["Years"]["Year"])
if "Timesteps" in self.glob_mapping.keys():
self.time_steps = list(self.glob_mapping["Timesteps"]["Timeslice"])
self.timeslice_fraction = self.glob_mapping["Timesteps"][
"Timeslice_fraction"
].values
else:
self.time_steps = ["Annual"]
self.timeslice_fraction = np.ones((1, 1))
# possible connections among the regions
if len(self.regions) > 1:
lines_obj = it.permutations(self.regions, r=2)
self.lines_list = []
for item in lines_obj:
if item[0] < item[1]:
self.lines_list.append("{}-{}".format(item[0], item[1]))
mapping = {}
for reg in self.regions:
wb = load_workbook(r"{}/{}.xlsx".format(self.path, reg))
sets = wb["Sets"]
self._setbase_reg = [
"Technologies",
"Carriers",
"Carrier_input",
"Carrier_output",
]
set_category = {key: value for key, value in sets.tables.items()}
reg_mapping = {}
for entry, data_boundary in sets.tables.items():
data = sets[data_boundary]
content = [[cell.value for cell in ent] for ent in data]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
reg_mapping[entry] = df
mapping[reg] = reg_mapping
for key, value in mapping[reg].items():
check_table_name(
file_name=reg,
allowed_names=list(regional_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, reg, pd.Index(regional_set_ids[key]))
check_nan(key, value, reg)
if key == "Technologies":
check_tech_category(value, technology_categories, reg)
if key == "Carriers":
check_carrier_type(value, carrier_types, reg)
if key == "Carrier_input" or key == "Carrier_output":
check_mapping_values(
value,
key,
mapping[reg]["Technologies"],
"Technologies",
"Technology",
"Technology",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_in",
"Carrier",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_out",
"Carrier",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Technologies"],
"Supply",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Technologies"],
"Demand",
reg,
)
self.mapping = mapping
Technologies = {}
for reg in self.regions:
regional_tech = {}
for key in list(self.mapping[reg]["Technologies"]["Tech_category"]):
regional_tech[key] = list(
self.mapping[reg]["Technologies"].loc[
self.mapping[reg]["Technologies"]["Tech_category"] == key
]["Technology"]
)
Technologies[reg] = regional_tech
self.Technologies = Technologies
self._create_input_data()
def _create_input_data(self):
"""
Defines the sheets, indices and columns of the parameter files
"""
if len(self.regions) > 1:
# Create the columns of inter-regional links as a multi-index of the
# pairs of regions and the transmitted carriers
indexer = pd.MultiIndex.from_product(
[self.lines_list, self.glob_mapping["Carriers_glob"]["Carrier"]],
names=["Line", "Transmitted Carrier"],
)
self.connection_sheet_ids = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Capacity_factor_line": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"AnnualProd_perunit_capacity": {
"value": 1,
"index": pd.Index(
["AnnualProd_Per_UnitCapacity"], name="Performance Parameter"
),
"columns": indexer,
},
}
self.global_sheet_ids = {
"Max_production_global": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Min_production_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Glob_emission_cap_annual": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Global Emission Cap"],
},
}
if self.mode == "Planning":
self.connection_sheet_ids.update(
{
"INV": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Decom_cost": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_totalcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_totalcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_newcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_newcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_lifetime": {
"value": 1,
"index": pd.Index(
["Technical Life Time"], name="Performance Parameter"
),
"columns": indexer,
},
"Line_Economic_life": {
"value": 1,
"index": pd.Index(
["Economic Life time"], name="Performance Parameter"
),
"columns": indexer,
},
"Interest_rate": {
"value": 0.05,
"index": pd.Index(
["Interest Rate"], name="Performance Parameter"
),
"columns": indexer,
},
}
)
self.global_sheet_ids.update(
{
"Min_totalcap_global": {
"value": 0,
"index": | pd.Index(self.main_years, name="Years") | pandas.Index |
import pandas as pd
name_dict = {
'Name': ['a','b','c','d'],
'Score': [90,80,95,20]
}
df = pd.DataFrame(name_dict)
dfT = df.T
dfT.to_csv('file_name.csv', index=False)
print (df)
print (dfT)
summary_df = | pd.read_csv('summary.csv') | pandas.read_csv |
from itertools import product
import numpy as np
import pytest
from pandas.core.dtypes.common import is_interval_dtype
import pandas as pd
import pandas._testing as tm
class TestSeriesConvertDtypes:
# The answerdict has keys that have 4 tuples, corresponding to the arguments
# infer_objects, convert_string, convert_integer, convert_boolean
# This allows all 16 possible combinations to be tested. Since common
# combinations expect the same answer, this provides an easy way to list
# all the possibilities
@pytest.mark.parametrize(
"data, maindtype, answerdict",
[
(
[1, 2, 3],
np.dtype("int32"),
{
((True, False), (True, False), (True,), (True, False)): "Int32",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"int32"
),
},
),
(
[1, 2, 3],
np.dtype("int64"),
{
((True, False), (True, False), (True,), (True, False)): "Int64",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"int64"
),
},
),
(
["x", "y", "z"],
np.dtype("O"),
{
(
(True, False),
(True,),
(True, False),
(True, False),
): | pd.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.datasets.multiclass_label_dataset import MulticlassLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
def test_multiclass_confusion_matrix():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 2],
[2, 1],
[2, 0],
[2, 2],
[2, 1]])
pred = data.copy()
pred[3,1] = 0
pred[4,1] = 2
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
favorable_values = [0,1]
unfavorable_values = [2]
mcld = MulticlassLabelDataset(favorable_label = favorable_values, unfavorable_label = unfavorable_values , df = df , label_names=['label'],
protected_attribute_names=['feat'])
mcld2 = MulticlassLabelDataset(favorable_label = favorable_values, unfavorable_label = unfavorable_values , df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(mcld, mcld2, unprivileged_groups=[{'feat': 2}],
privileged_groups=[{'feat': 0},{'feat': 1}])
confusion_matrix = cm.binary_confusion_matrix()
actual_labels_df = df[['label']].values
actual_labels_df2 = df2[['label']].values
assert np.all(actual_labels_df == mcld.labels)
assert np.all(actual_labels_df2 == mcld2.labels)
assert confusion_matrix == {'TP': 7.0, 'FN': 1.0, 'TN': 2.0, 'FP': 0.0}
fnr = cm.false_negative_rate_difference()
assert fnr == -0.2
def test_generalized_binary_confusion_matrix():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[1, 2],
[0, 0],
[0, 0],
[1, 2]])
pred = np.array([[0, 1, 0.8],
[0, 0, 0.6],
[1, 0, 0.7],
[1, 1, 0.8],
[1, 2, 0.36],
[1, 0, 0.82],
[1, 1, 0.79],
[0, 2, 0.42],
[0, 1, 0.81],
[1, 2, 0.3]])
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = | pd.DataFrame(pred, columns=['feat', 'label', 'score']) | pandas.DataFrame |
import tkinter
from tkinter import *
from tkinter import Tk
from tkinter import ttk
from tkinter import Label
from tkinter import StringVar
from tkinter import filedialog
from tkinter import messagebox
from matplotlib import pyplot as plt
import numpy as np
import statistics as st
import gpxpy
import pandas as pd
import math
from datetime import datetime
import os
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# map functionality
import gpxpy
import mplleaflet
import matplotlib.pyplot as plt
import subprocess
# Creating the class of the application
class Application(Tk):
# Defining the __init__ function
def __init__(self):
super().__init__()
self.geometry('750x2520')
self.grid()
self.distancesR1 = []
self.datesR1 = []
self.timesR1 = []
self.numR1 = 0
self.speedR1 = []
self.distancesR2 = []
self.datesR2 = []
self.timesR2 = []
self.numR2 = 0
self.speedR2 = []
self.rider1 = []
self.rider2 = []
self.map_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def unfold(df,s):
df=df[s].values
lst=[]
for i in df:
dic={}
for j in range(len(i)):
dic[j]=i[j]
lst.append(dic)
return | pd.DataFrame(lst) | pandas.DataFrame |
# Copyright 2019 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""metnet_seg_experiment_evaluator.py
Experiment evaluator for the metNet segmentation experiments.
"""
from src.utils.general_utils import read_hdf5, read_hdf5_multientry, write_hdf5
import argparse
import os
import numpy as np
from glob import glob
from medpy import metric as mpm
from sklearn import metrics as skm
import pandas as pd
from math import sqrt
import keras.backend.tensorflow_backend as K
from src.utils.data_generators import FCN2DDatasetGenerator
import time
import datetime
from keras.models import load_model
def relative_volume_difference(A, B):
'''Compute relative volume difference between two segmentation masks.
The voxel size gets canceled out in the division and is therefore not
a required input.
:param A: (binary numpy array) reference segmentaton mask
:param B: (binary numpy array) predicted segmentaton mask
:return: relative volume difference
'''
volume_A = int(np.sum(A))
volume_B = int(np.sum(B))
rvd = (volume_A - volume_B) / volume_A
return rvd
def jaccard_index(A, B):
'''Compute Jaccard index (IoU) between two segmentation masks.
:param A: (numpy array) reference segmentaton mask
:param B: (numpy array) predicted segmentaton mask
:return: Jaccard index
'''
both = np.logical_and(A, B)
either = np.logical_or(A, B)
ji = int(np.sum(both)) / int(np.sum(either))
return ji
def dice_similarity_coefficient(A, B):
'''Compute Dice similarity coefficient between two segmentation masks.
:param A: (numpy array) reference segmentaton mask
:param B: (numpy array) predicted segmentaton mask
:return: Dice similarity coefficient
'''
both = np.logical_and(A, B)
dsc = 2 * int(np.sum(both)) / (int(np.sum(A)) + int(np.sum(B)))
return dsc
def precision(A, B):
'''Compute precision between two segmentation masks.
:param A: (numpy array) reference segmentaton mask
:param B: (numpy array) predicted segmentaton mask
:return: precision
'''
tp = int(np.sum(np.logical_and(A, B)))
fp = int(np.sum(np.logical_and(B, np.logical_not(A))))
p = tp / (tp + fp)
return p
def recall(A, B):
'''Compute recall between two segmentation masks.
:param A: (numpy array) reference segmentaton mask
:param B: (numpy array) predicted segmentaton mask
:return: recall
'''
tp = int(np.sum(np.logical_and(A, B)))
fn = int(np.sum(np.logical_and(A, np.logical_not(B))))
r = tp / (tp + fn)
return r
def matthews_correlation_coefficient(A, B):
'''Compute Matthews correlation coefficient between two segmentation masks.
:param A: (numpy array) reference segmentaton mask
:param B: (numpy array) predicted segmentaton mask
:return: Matthews correlation coefficient
'''
tp = int(np.sum(np.logical_and(A, B)))
fp = int(np.sum(np.logical_and(B, np.logical_not(A))))
tn = int(np.sum(np.logical_and(np.logical_not(A), np.logical_not(B))))
fn = int(np.sum(np.logical_and(A, np.logical_not(B))))
mcc = (tp * tn - fp * fn) / (sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)))
return mcc
def main(FLAGS):
# set GPU device to use
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# get the models to evaluate
ckpts = glob(os.path.join(FLAGS.ckpt_dir, '*.h5'))
# get data files and sort them
image_files = os.listdir(FLAGS.test_X_dir)
anno_files = os.listdir(FLAGS.test_y_dir)
image_files.sort()
anno_files.sort()
for ckpt in ckpts:
K.clear_session()
# set model to load
ckpt_name = os.path.basename(ckpt)
# create a location to store evaluation metrics
metrics = np.zeros((len(image_files), FLAGS.classes, 8))
overall_accuracy = np.zeros((len(image_files),))
# create a file writer to store the metrics
excel_name = os.path.splitext(os.path.basename(ckpt))[0] + '.xlsx'
writer = pd.ExcelWriter(excel_name)
model = load_model(ckpt)
for i in range(len(image_files)):
# define path to the test data
test_path = os.path.join(FLAGS.test_X_dir, image_files[i])
generator = FCN2DDatasetGenerator(test_path,
batch_size=FLAGS.batch_size,
subset='test',
normalization=FLAGS.normalization,
categorical_labels=True,
num_classes=FLAGS.classes)
# check if the images and annotations are the correct files
print(image_files[i], anno_files[i])
preds = model.predict_generator(generator.generate(), steps=len(generator))
stamp = datetime.datetime.fromtimestamp(time.time()).strftime('date_%Y_%m_%d_time_%H_%M_%S')
write_hdf5('fcn_predictions_' + stamp + '.h5', preds)
pred_file = glob(os.path.join(FLAGS.predictions_temp_dir, '*.h5'))[0]
pt_name = image_files[i].split('.')[0]
new_name_raw = pt_name + ckpt_name
new_file_raw = os.path.join(FLAGS.predictions_final_dir, new_name_raw)
os.rename(pred_file, new_file_raw)
ref = read_hdf5_multientry(os.path.join(FLAGS.test_y_dir, anno_files[i]))
ref = np.squeeze(np.asarray(ref))
preds = read_hdf5(new_file_raw)
preds = np.argmax(preds, axis=-1)
overall_accuracy[i] = skm.accuracy_score(ref.flatten(), preds.flatten())
for j in range(FLAGS.classes):
organ_pred = (preds == j).astype(np.int64)
organ_ref = (ref == j).astype(np.int64)
if np.sum(organ_pred) == 0 or np.sum(organ_ref) == 0:
metrics[i, j, 0] = 0.
metrics[i, j, 1] = 0.
metrics[i, j, 2] = 1.
metrics[i, j, 3] = 0.
metrics[i, j, 4] = 0.
metrics[i, j, 5] = 0.
metrics[i, j, 6] = np.inf
metrics[i, j, 7] = np.inf
else:
metrics[i, j, 0] = jaccard_index(organ_ref, organ_pred)
metrics[i, j, 1] = dice_similarity_coefficient(organ_ref, organ_pred)
metrics[i, j, 2] = relative_volume_difference(organ_ref, organ_pred)
metrics[i, j, 3] = precision(organ_ref, organ_pred)
metrics[i, j, 4] = recall(organ_ref, organ_pred)
metrics[i, j, 5] = matthews_correlation_coefficient(organ_ref, organ_pred)
metrics[i, j, 6] = mpm.hd95(organ_pred, organ_ref)
metrics[i, j, 7] = mpm.assd(organ_pred, organ_ref)
print(overall_accuracy[i])
print(metrics[i])
for k in range(metrics.shape[-1]):
data = | pd.DataFrame(metrics[:, :, k], columns=['bg', 'met']) | pandas.DataFrame |
# link: https://github.com/liyaguang/DCRNN
import numpy as np
import pandas as pd
import json
import util
outputdir = 'output/PEMS_BAY'
util.ensure_dir(outputdir)
dataurl = 'input/PEMS-BAY/'
dataname = outputdir+'/PEMS_BAY'
dataset = | pd.read_csv(dataurl+'sensor_graph/graph_sensor_locations_bay.csv', header=None) | pandas.read_csv |
"""
Module: LMR_psms.py
Purpose: Module containing methods for various Proxy System Models (PSMs)
Adapted from LMR_proxy and LMR_calibrate using OOP by <NAME>
Originator: <NAME>, U. of Washington
Revisions:
- Use of new more efficient "get_distance" function to calculate the
distance between proxy sites and analysis grid points.
[R. Tardif, U. of Washington, February 2016]
- Added the "LinearPSM_TorP" class, allowing the use of
temperature-calibrated *OR* precipitation-calibrated linear PSMs.
For each proxy record to be assimilated, the selection of the PSM
(i.e. T vs P) is perfomed on the basis of the smallest MSE of
regression residuals.
[<NAME>, U. of Washington, April 2016]
- Added the "h_interpPSM" psm class for use of isotope-enabled GCM
data as prior: Ye values are taken as the prior isotope field either
at the nearest grid pt. or as the weighted-average of values at grid
points surrounding the assimilated isotope proxy site.
[ <NAME>, U. of Washington, June 2016 ]
- Added the "BilinearPSM" class for PSMs based on bivariate linear
regressions w/ temperature AND precipitation/PSDI as independent
variables.
[ R. Tardif, Univ. of Washington, June 2016 ]
- Added the capability of calibrating/using PSMs calibrated on the basis
of a proxy record seasonality metadata.
[ R. Tardif, Univ. of Washington, July 2016 ]
- Added the capability of objectively calibrating/using PSMs calibrated
on the basis objectively-derived seasonality.
[ <NAME>, Univ. of Washington, December 2016 ]
- Added the "BayesRegUK37PSM" class, the forward model used in
the assimilation of alkenone uk37 proxy data. Code based on
spline coefficients provided by <NAME> (U of Arizona).
[ <NAME>, Univ. of Washington, January 2017 ]
- Calibration of statistical PSMs now all referenced to anomalies w.r.t.
20th century.
[ <NAME>, Univ. of Washington, August 2017 ]
"""
import numpy as np
import logging
import os.path
import LMR_calibrate
from LMR_utils import (haversine, get_distance, smooth2D,
get_data_closest_gridpt, class_docs_fixer)
import pandas as pd
from scipy.stats import linregress
import statsmodels.formula.api as sm
from abc import ABCMeta, abstractmethod
from load_data import load_cpickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Needed in BayesRegUK37PSM class
#import matlab.engine # for old matlab implementation
from scipy.io import loadmat
import scipy.interpolate as interpolate
# Logging output utility, configuration controlled by driver
logger = logging.getLogger(__name__)
class BasePSM(metaclass=ABCMeta):
"""
Proxy system model.
Parameters
----------
config: LMR_config
Configuration module used for current LMR run.
proxy_obj: BaseProxyObject like
Proxy object that this PSM is being attached to
psm_kwargs: dict (unpacked)
Specfic arguments for the target PSM
"""
def __init__(self, config, proxy_obj, **psm_kwargs):
pass
@abstractmethod
def psm(self, prior_obj):
"""
Maps a given state to observations for the given proxy
Parameters
----------
prior_obj BasePriorObject
Prior to be mapped to observation space (Ye).
Returns
-------
Ye:
Equivalent observation from prior
"""
pass
@abstractmethod
def error(self):
"""
Error model for given PSM.
"""
pass
@staticmethod
@abstractmethod
def get_kwargs(config):
"""
Returns keyword arguments required for instantiation of given PSM.
Parameters
----------
config: LMR_config
Configuration module used for current LMR run.
Returns
-------
kwargs: dict
Keyword arguments for given PSM
"""
pass
@class_docs_fixer
class LinearPSM(BasePSM):
"""
PSM based on linear regression.
Attributes
----------
lat: float
Latitude of associated proxy site
lon: float
Longitude of associated proxy site
elev: float
Elevation/depth of proxy sitex
corr: float
Correlation of proxy record against calibration data
slope: float
Linear regression slope of proxy/calibration fit
intercept: float
Linear regression y-intercept of proxy/calibration fit
R: float
Mean-squared error of proxy/calibration fit
Parameters
----------
config: LMR_config
Configuration module used for current LMR run.
proxy_obj: BaseProxyObject like
Proxy object that this PSM is being attached to
psm_data: dict
Pre-calibrated PSM dictionary containing current associated
proxy site's calibration information
Raises
------
ValueError
If PSM is below critical correlation threshold.
"""
def __init__(self, config, proxy_obj, psm_data=None, calib_obj=None):
self.psm_key = 'linear'
proxy = proxy_obj.type
site = proxy_obj.id
r_crit = config.psm.linear.psm_r_crit
self.lat = proxy_obj.lat
self.lon = proxy_obj.lon
self.elev = proxy_obj.elev
self.datatag_calib = config.psm.linear.datatag_calib
self.avgPeriod = config.psm.linear.avgPeriod
# Very crude assignment of sensitivity
# TODO: more inclusive & flexible way of doing this
if self.datatag_calib in ['GPCC', 'DaiPDSI']:
self.sensitivity = 'moisture'
else:
self.sensitivity = 'temperature'
try:
# Try using pre-calibrated psm_data
if psm_data is None:
psm_data = self._load_psm_data(config)
psm_site_data = psm_data[(proxy, site)]
self.corr = psm_site_data['PSMcorrel']
self.slope = psm_site_data['PSMslope']
self.intercept = psm_site_data['PSMintercept']
self.R = psm_site_data['PSMmse']
# check if seasonality defined in the psm data
# if it is, return as an attribute
if 'Seasonality' in list(psm_site_data.keys()):
self.seasonality = psm_site_data['Seasonality']
except KeyError as e:
raise ValueError('Proxy in database but not found in pre-calibration file... '
'Skipping: {}'.format(proxy_obj.id))
except IOError as e:
# No precalibration found, have to do it for this proxy
print('No pre-calibration file found for'
' {} ({}) ... calibrating ...'.format(proxy_obj.id,
proxy_obj.type))
# check if calibration object already exists
if calib_obj is None:
#print 'calibration object does not exist ...creating it...'
# TODO: Fix call and Calib Module
datag_calib = config.psm.linear.datatag_calib
calib_obj= LMR_calibrate.calibration_assignment(datag_calib)
calib_obj.datadir_calib = config.psm.linear.datadir_calib
calib_obj.read_calibration()
self.calibrate(calib_obj, proxy_obj)
# Raise exception if critical correlation value not met
if abs(self.corr) < r_crit:
raise ValueError(('Proxy model correlation ({:.2f}) does not meet '
'critical threshold ({:.2f}).'
).format(self.corr, r_crit))
# TODO: Ideally prior state info and coordinates should all be in single obj
def psm(self, Xb, X_state_info, X_coords):
"""
Maps a given state to observations for the given proxy
Parameters
----------
Xb: ndarray
State vector to be mapped into observation space (stateDim x ensDim)
X_state_info: dict
Information pertaining to variables in the state vector
X_coords: ndarray
Coordinates for the state vector (stateDim x 2)
Returns
-------
Ye:
Equivalent observation from prior
"""
# ----------------------
# Calculate the Ye's ...
# ----------------------
# Associate state variable with PSM calibration dataset
# TODO: possible calibration sources hard coded for now... should define associations at config level
if self.datatag_calib in ['GISTEMP', 'MLOST', 'NOAAGlobalTemp', 'HadCRUT', 'BerkeleyEarth']:
# temperature
state_var = 'tas_sfc_Amon'
elif self.datatag_calib in ['GPCC','DaiPDSI']:
# moisture
if self.datatag_calib == 'GPCC':
state_var = 'pr_sfc_Amon'
elif self.datatag_calib == 'DaiPDSI':
state_var = 'scpdsi_sfc_Amon'
else:
raise KeyError('Unrecognized moisture calibration source.'
' State variable not identified for Ye calculation.')
else:
raise KeyError('Unrecognized calibration-state variable association.'
' State variable not identified for Ye calculation.')
if state_var not in X_state_info.keys():
raise KeyError('Needed variable not in state vector for Ye'
' calculation.')
# TODO: end index should already be +1, more pythonic
tas_startidx, tas_endidx = X_state_info[state_var]['pos']
ind_lon = X_state_info[state_var]['spacecoords'].index('lon')
ind_lat = X_state_info[state_var]['spacecoords'].index('lat')
X_lons = X_coords[tas_startidx:(tas_endidx+1), ind_lon]
X_lats = X_coords[tas_startidx:(tas_endidx+1), ind_lat]
tas_data = Xb[tas_startidx:(tas_endidx+1), :]
gridpoint_data = self.get_close_grid_point_data(tas_data.T,
X_lons,
X_lats)
Ye = self.basic_psm(gridpoint_data)
return Ye
def basic_psm(self, data):
"""
A PSM that doesn't need to do the state unpacking steps...
Parameters
----------
data: ndarray
Data to be used in the psm calculation of estimated observations
(Ye)
Returns
-------
Ye: ndarray
Estimated observations from the proxy system model
"""
return self.slope * data + self.intercept
def get_close_grid_point_data(self, data, lon, lat):
"""
Extracts data along the sampling dimension that is closest to the
grid point (lat/lon) of the current PSM object.
Parameters
----------
data: ndarray
Gridded data matching dimensions of (sample, lat, lon) or
(sample, lat*lon).
lon: ndarray
Longitudes pertaining to the input data. Can have shape as a single
vector (lat), a grid (lat, lon), or a flattened grid (lat*lon).
lat: ndarray
Latitudes pertaining to the input data. Can have shape as a single
vector (lon), a grid (lat, lon), or a flattened grid (lat*lon).
Returns
-------
tmp_dat: ndarray
Grid point data closes to the lat/lon of the current PSM object
"""
lonshp = lon.shape
latshp = lat.shape
# If not equal we got a single vector?
if lonshp != latshp:
lon, lat = np.meshgrid(lon, lat)
if len(lon.shape) > 1:
lon = lon.ravel()
lat = lat.ravel()
# Calculate distance
dist = haversine(self.lon, self.lat, lon, lat)
if len(dist) in data.shape:
loc_idx = dist.argmin()
tmp_dat = data[..., loc_idx]
else:
# TODO: This is not general lat/lon being swapped, OK for now...
min_dist_lat_idx, \
min_dist_lon_idx = np.unravel_index(dist.argmin(), data.shape[-2:])
tmp_dat = data[..., min_dist_lat_idx, min_dist_lon_idx]
return tmp_dat
# Define the error model for this proxy
@staticmethod
def error():
return 0.1
# TODO: Simplify a lot of the actions in the calibration
def calibrate(self, C, proxy, diag_output=False, diag_output_figs=False):
"""
Calibrate given proxy record against observation data and set relevant
PSM attributes.
Parameters
----------
C: calibration_master like
Calibration object containing data, time, lat, lon info
proxy: BaseProxyObject like
Proxy object to fit to the calibration data
diag_output, diag_output_figs: bool, optional
Diagnostic output flags for calibration method
"""
calib_spatial_avg = False
Npts = 9 # nb of neighboring pts used in smoothing
#print 'Calibrating: ', '{:25}'.format(proxy.id), '{:35}'.format(proxy.type)
# --------------------------------------------
# Use linear model (regression) as default PSM
# --------------------------------------------
nbmaxnan = 0
# Look for indices of calibration grid point closest in space (in 2D)
# to proxy site
dist = get_distance(proxy.lon, proxy.lat, C.lon, C.lat)
# indices of nearest grid pt.
jind, kind = np.unravel_index(dist.argmin(), dist.shape)
if calib_spatial_avg:
C2Dsmooth = np.zeros(
[C.time.shape[0], C.lat.shape[0], C.lon.shape[0]])
for m in range(C.time.shape[0]):
C2Dsmooth[m, :, :] = smooth2D(C.temp_anomaly[m, :, :], n=Npts)
calvals = C2Dsmooth[:, jind, kind]
else:
calvals = C.temp_anomaly[:, jind, kind]
# -------------------------------------------------------
# Calculate averages of calibration data over appropriate
# intervals (annual or according to proxy seasonality)
# -------------------------------------------------------
if self.avgPeriod == 'annual':
# Simply use annual averages
avgMonths = [1,2,3,4,5,6,7,8,9,10,11,12]
elif 'season' in self.avgPeriod:
# Consider the seasonality of the proxy record
avgMonths = proxy.seasonality
else:
print('ERROR: Unrecognized value for avgPeriod! Exiting!')
exit(1)
nbmonths = len(avgMonths)
cyears = np.asarray(list(set([C.time[k].year for k in range(len(C.time))]))) # 'set' is used to get unique values
nbcyears = len(cyears)
reg_x = np.zeros(shape=[nbcyears])
reg_x[:] = np.nan # initialize with nan's
for i in range(nbcyears):
# monthly data from current year
indsyr = [j for j,v in enumerate(C.time) if v.year == cyears[i] and v.month in avgMonths]
# check if data from previous year is to be included
indsyrm1 = []
if any(m < 0 for m in avgMonths):
year_before = [abs(m) for m in avgMonths if m < 0]
indsyrm1 = [j for j,v in enumerate(C.time) if v.year == cyears[i]-1. and v.month in year_before]
# check if data from following year is to be included
indsyrp1 = []
if any(m > 12 for m in avgMonths):
year_follow = [m-12 for m in avgMonths if m > 12]
indsyrp1 = [j for j,v in enumerate(C.time) if v.year == cyears[i]+1. and v.month in year_follow]
inds = indsyrm1 + indsyr + indsyrp1
if len(inds) == nbmonths: # all months are in the data
tmp = np.nanmean(calvals[inds],axis=0)
nancount = np.isnan(calvals[inds]).sum(axis=0)
if nancount > nbmaxnan: tmp = np.nan
else:
tmp = np.nan
reg_x[i] = tmp
# ------------------------
# Set-up linear regression
# ------------------------
# Use pandas DataFrame to store proxy & calibration data side-by-side
header = ['variable', 'y']
# Fill-in proxy data
df = | pd.DataFrame({'time':proxy.time, 'y': proxy.values}) | pandas.DataFrame |
"""
In this file, we implement the functionality that anonymizes the trace variant based on
the DAFSA automata and using differential privacy
"""
import pandas as pd
import numpy as np
import random as r
import time
import math
from scipy.stats import laplace
#import swifter
def build_DAFSA_bit_vector(data):
#calculate the bit vector dataframe from the trace and state anotated event log
#getting unique dafsa edges and trace variant
# data=data.groupby(['prev_state', 'concept:name','state','trace_variant']).size().reset_index().rename(columns={0: 'count'})
# data.drop('count',axis=1, inplace=True)
bit_vector_df=data.groupby(['prev_state', 'concept:name','state','trace_variant'])['case:concept:name'].count().unstack().reset_index()
# bit_vector_noise = data.groupby(['prev_state', 'concept:name', 'state'])[
# 'case:concept:name'].count() # .unstack().reset_index()
#indexed by transition
# bit_vector_df=data.groupby(['prev_state', 'concept:name','state'])['trace_variant'].unique().apply(list)
#indexed by trace_variant
# bit_vector_trace_variant = data.groupby(['trace_variant'])[['prev_state', 'concept:name', 'state']].apply(list)
# del(data)
# bit_vector_df[:]=True
# bit_vector_df=bit_vector_df.reset_index()
# bit_vector_df = bit_vector_df.to_frame()
# fix memory error
# bit_vector_df=bit_vector_df.unstack()
# print('1')
# bit_vector_df=bit_vector_df.reset_index()
# print('2')
# bit_vector_df.fillna(False,inplace=True)
# print('3')
# res=data.groupby(['prev_state', 'concept:name','state','trace_variant'])['case:concept:name'].transform('any')#.unstack().reset_index()
# bit_vector_df= pd.pivot_table(data=data, values= 'case:concept:name', index=['prev_state', 'concept:name','state'],columns=['trace_variant'], aggfunc='count', fill_value=0).reset_index()
bit_vector_df['added_noise']= [0]* bit_vector_df.shape[0]
# bit_vector_df.drop('case:concept:name', axis=1, inplace=True)
return bit_vector_df
def build_DAFSA_bit_vector_compacted(data,eps):
#calculate the bit vector dataframe from the trace and state anotated event log
#indexed by transition
#not unique as we perform weighted sampling
bit_vector_df=data.groupby(['prev_state', 'concept:name','state'])['trace_variant'].apply(list)
#indexed by trace_variant
bit_vector_trace_variant = data.groupby(['trace_variant'])[['prev_state', 'concept:name', 'state']].apply(lambda x: x.values.tolist())
# del(data)
# bit_vector_df[:]=True
# bit_vector_df=bit_vector_df.reset_index()
bit_vector_df = bit_vector_df.to_frame()
# fix memory error
bit_vector_df['added_noise']= [0]* bit_vector_df.shape[0]
noise=[get_noise(eps) for x in range(0,bit_vector_df.shape[0])]
bit_vector_df['noise']=noise
# bit_vector_df.drop('case:concept:name', axis=1, inplace=True)
# print("*********** yay ***************&&")
return bit_vector_df ,bit_vector_trace_variant
def get_noise(eps):
sens=1
noise = laplace.rvs(loc=0, scale=sens / eps, size=1)[0]
noise = int(math.ceil(abs(noise)))
return noise
def reversed_normalization(a):
# where 0 has the largest weight.
m = max(a)
a = a
a = m - a
#if all edges need the same noise
# if need_noise.added_noise.max()==need_noise.added_noise.min():
# #make the weight for the one that is common between most traces
# s=need_noise.iloc[:,3:-1].sum(axis=1)
# a=s/s.sum()
if sum(a)==0:
#if all the items are zeros
a=(a+1)/a.shape[0]
else:
a=a/sum(a)
return a
def pick_random_edge_trace(bit_vector_df,noise):
#picks a random edge, then picks a random trace variant of that edge. It adds the noise
#to the column added noise
# need_noise = bit_vector_df.loc[bit_vector_df.added_noise < noise, :].dropna()
added_noise=bit_vector_df.added_noise
need_noise=added_noise[added_noise<noise]
#performing weighted random sampling
# perform reverse weight
# make the weight of the edge that is part of a lot of trace variants to be larger
edge_sampling_weights=reversed_normalization(need_noise)
picked_edge_index =need_noise.sample(weights=edge_sampling_weights).index[0]
# pick random trace variant
# traces=picked_edge.drop(['prev_state','concept:name','state','added_noise'],axis=1)
traces=bit_vector_df.iloc[picked_edge_index,3:-1]
traces=traces.T.reset_index() #transpose the traces
traces.columns=['trace_variant','trace_count']
# traces.trace_count=traces.trace_count.astype(int)
"""*** Compare here"""
trace_sampling_weights=traces.trace_count/traces.trace_count.sum()
#picking traces as the noise size
# picked_trace= traces.sample(n=noise, weights=trace_sampling_weights, replace=True)
picked_trace = traces.sample(n=noise, weights=trace_sampling_weights, replace=True)
# picked_trace=picked_trace.trace_variant.iloc[0]
# picked_trace = picked_trace.trace_variant
# update the noise of all edges of that trace
# bit_vector_df.added_noise[bit_vector_df[picked_trace]>0]=bit_vector_df.added_noise[bit_vector_df[picked_trace]>0]+1
for trace_index in range(0,noise):
trace= picked_trace.trace_variant.iloc[trace_index]
bit_vector_df.added_noise[bit_vector_df[trace] > 0] = bit_vector_df.added_noise[
bit_vector_df[trace] > 0] + 1
return bit_vector_df, picked_trace
def pick_random_edge_trace_compacted(bit_vector_df, bit_vector_trace_variant):
#picks a random edge, then picks a random trace variant of that edge. It adds the noise
#to the column added noise
func_start=time.time()
# need_noise = bit_vector_df.loc[bit_vector_df.added_noise < noise, :].dropna()
added_noise=bit_vector_df.added_noise
need_noise=added_noise[added_noise<bit_vector_df.noise]
#performing weighted random sampling
# perform reverse weight
# make the weight of the edge that is part of a lot of trace variants to be larger
start=time.time()
edge_sampling_weights=reversed_normalization(need_noise)
end=time.time()
# print("reversed_normalization : %s" %(end-start))
picked_edge_index =need_noise.sample(weights=edge_sampling_weights).index[0]
# picked_edge_index = need_noise.sample().index[0]
# pick random trace variant
# traces=picked_edge.drop(['prev_state','concept:name','state','added_noise'],axis=1)
start=time.time()
traces=pd.Series(bit_vector_df.loc[picked_edge_index,'trace_variant'])
traces=traces.value_counts().to_frame().reset_index()
traces.columns=['trace_variant','trace_count']
end = time.time()
# print("counting : %s" % (end - start))
# traces.trace_count=traces.trace_count.astype(int)
"""*** Compare here"""
trace_sampling_weights=traces.trace_count/traces.trace_count.sum()
# trace_sampling_weights = traces/ traces.sum()
#picking traces as the noise size
# picked_trace= traces.sample(n=noise, weights=trace_sampling_weights, replace=True)
temp=bit_vector_df.loc[picked_edge_index,'noise']
picked_trace = traces.sample(n=bit_vector_df.loc[picked_edge_index,'noise'], weights=trace_sampling_weights, replace=True)
# picked_trace = traces.sample(n=noise, weights=trace_sampling_weights, replace=True)
# picked_trace=picked_trace.trace_variant.iloc[0]
# picked_trace = picked_trace.trace_variant
# update the noise of all edges of that trace
# bit_vector_df.added_noise[bit_vector_df[picked_trace]>0]=bit_vector_df.added_noise[bit_vector_df[picked_trace]>0]+1
# for trace_index in range(0,noise):
# trace= picked_trace.trace_variant.iloc[trace_index]
# bit_vector_df.added_noise[bit_vector_df[trace] > 0] = bit_vector_df.added_noise[
# bit_vector_df[trace] > 0] + 1
start=time.time()
for trace_index in picked_trace.trace_variant:
trace_edges= bit_vector_trace_variant.loc[trace_index]
bit_vector_df.added_noise.loc[trace_edges] = bit_vector_df.added_noise.loc[trace_edges] + 1
end = time.time()
# print("trace index loop : %s" % (end - start))
func_end=time.time()
# print("pick_random_edge_trace_compacted time : %s"%(func_end-func_start))
return bit_vector_df, picked_trace
def sampling(row,duplicated_traces):
trace_variant= row.trace_variant.iloc[0]
sample_size=duplicated_traces[trace_variant]
row=row.sample(n=sample_size, replace=True)
return row
def execute_oversampling(data,duplicated_traces):
#duplicating the original case id to know which case is original and which is a copy.
# that is needed to estimate to scale the epsilon of the duplicated cases.
data['original_case:concept:name']=data['case:concept:name']
#count per trace variant
duplicated_traces=pd.Series(duplicated_traces).value_counts()
all_traces=pd.Series(data.trace_variant.unique())
non_duplicated=(set(list(data.trace_variant.unique())) - set(list(duplicated_traces.index)))
# non_duplicated=all_traces[~all_traces.isin(list(duplicated_traces.index))]
# non_duplicated[:]=0
non_duplicated= pd.Series([0]*len(non_duplicated),index=non_duplicated)
duplicated_traces=duplicated_traces.append(non_duplicated) #all the sampling ratios should exist
# duplicated traces
#sampling from event log based on the count of each trace variant
duplicated_cases=data[['trace_variant','case:concept:name']].reset_index(drop=True)
duplicated_cases=duplicated_cases.groupby(['case:concept:name','trace_variant']).size().reset_index()
start=time.time()
# duplicated_cases=duplicated_cases.apply(lambda x:x.sample(n=duplicated_traces[x.trace_variant]), axis=1)#.reset_index(drop=True)
# duplicated_cases = duplicated_cases.swifter.apply(sampling,duplicated_traces=duplicated_traces, axis=1) # .reset_index(drop=True)
duplicated_cases = duplicated_cases.groupby(['trace_variant']).apply(sampling, duplicated_traces=duplicated_traces) # .reset_index(drop=True)
duplicated_cases=duplicated_cases.drop(['trace_variant'],axis=1)
# fix the problem when same case duplicated
# take out the duplicated case id
cases_more_than_once = duplicated_cases.groupby(['case:concept:name'])['case:concept:name'].count()
end=time.time()
print("sampling time: %s" %(end-start))
# all the cases only once
duplicated_cases=duplicated_cases['case:concept:name'].unique()
duplicated_cases=pd.DataFrame(duplicated_cases,columns=['case:concept:name'])
data = duplicate_cases(data, duplicated_cases)
cases_more_than_once = cases_more_than_once-1 # already duplicated once
cases_more_than_once=cases_more_than_once[cases_more_than_once>0]
# loop for the duplicated case ids and every time add only one duplication
start=time.time()
while len(cases_more_than_once>0):
duplicated_cases=cases_more_than_once.to_frame()
duplicated_cases.columns = ['cnt']
duplicated_cases=duplicated_cases.reset_index()
duplicated_cases.drop(['cnt'],axis=1, inplace=True)
data = duplicate_cases(data, duplicated_cases)
cases_more_than_once = cases_more_than_once-1 # duplicated once
cases_more_than_once = cases_more_than_once[cases_more_than_once > 0]
end = time.time()
print("loop of duplication: %s" % (end - start))
return data
def duplicate_cases(data, duplicated_cases):
#this function duplicate the cases only once and append them to the event log
duplicated_cases = duplicated_cases.rename(columns={'case:concept:name': 'duplicated_case_ids'})
duplicated_cases = duplicated_cases.merge(data, how='left', left_on='duplicated_case_ids',
right_on='case:concept:name').drop('duplicated_case_ids', axis=1)
# replace the case id in the sample
case_ids = duplicated_cases['case:concept:name'].unique()
randomlist = r.sample(range(data.shape[0]+1, data.shape[0]+1+len(case_ids) * 2), len(case_ids))
mapping = | pd.Series(randomlist, index=case_ids) | pandas.Series |
from beer.ingest.current_csv_inventory import CurrentCSVInventory
import io
import pandas as pd
header = '"name","size","category","quantity","retail","case_retail","case_pack","timestamp"'
bells = '"BELLS BEST BROWN","6/12 OZ BTL","CRAFT","4.00","11.8500","40.9900","4","2018-11-21 17:20:25.438956"'
bells_dup = '"BELLS BEST BROWN","6/12 OZ BTL","CRAFT","32.00","11.8500","40.9900","4","2018-11-21 17:20:25.438956"'
bud = '"BUDWEISER","12/12 OZ BTL","DOMESTIC","32.00","11.4900","21.4900","2","2018-11-21 17:20:25.438956"'
bells2 = '"BELLS BEST BROWN","6/12 OZ BTL","CRAFT","4.00","11.8500","40.9900","4","2018-11-21 17:40:25.438956"'
bud2 = '"BUDWEISER","12/12 OZ BTL","DOMESTIC","32.00","11.4900","21.4900","2","2018-11-21 17:40:25.438956"'
def test_first_call_returns_parameter():
columns = ['name', 'size', 'category', 'quantity', 'retail', 'case_retail', 'case_pack', 'timestamp']
empty_df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from __future__ import annotations
from typing import Sequence
from functools import partial
import pytest
import platform
from anndata import AnnData
import scanpy as sc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from squidpy import pl
from squidpy.gr import spatial_neighbors
from tests.conftest import PlotTester, PlotTesterMeta
from squidpy.pl._spatial_utils import _get_library_id
from squidpy._constants._pkg_constants import Key
sc.set_figure_params(dpi=40, color_map="viridis")
# WARNING:
# 1. all classes must both subclass PlotTester and use metaclass=PlotTesterMeta
# 2. tests which produce a plot must be prefixed with `test_plot_`
# 3. if the tolerance needs to be change, don't prefix the function with `test_plot_`, but with something else
# the comp. function can be accessed as `self.compare(<your_filename>, tolerance=<your_tolerance>)`
# ".png" is appended to <your_filename>, no need to set it
class TestSpatialStatic(PlotTester, metaclass=PlotTesterMeta):
def test_tol_plot_spatial_scatter_image(self, adata_hne: AnnData):
pl.spatial_scatter(adata_hne, na_color="lightgrey")
self.compare("SpatialStatic_spatial_scatter_image", tolerance=60)
def test_plot_spatial_scatter_noimage(self, adata_hne: AnnData):
pl.spatial_scatter(adata_hne, shape=None, na_color="lightgrey")
def test_plot_spatial_scatter_group_outline(self, adata_hne: AnnData):
pl.spatial_scatter(adata_hne, shape="circle", color="cluster", groups="Cortex_1", outline=True)
def test_plot_spatial_scatter_title_single(self, adata_hne_concat: AnnData):
pl.spatial_scatter(
adata_hne_concat,
shape="hex",
library_key="library_id",
library_id=["V2_Adult_Mouse_Brain"],
color=["Sox17", "cluster"],
title="Visium test",
)
def test_plot_spatial_scatter_crop_graph(self, adata_hne_concat: AnnData):
pl.spatial_scatter(
adata_hne_concat,
shape="square",
library_key="library_id",
size=[0.3, 0.3],
color=["Sox17", "cluster"],
connectivity_key="spatial_connectivities",
edges_width=5,
title=None,
outline=True,
library_first=False,
outline_width=(0.05, 0.05),
crop_coord=[(0, 0, 300, 300), (0, 0, 300, 300)],
scalebar_dx=2.0,
scalebar_kwargs={"scale_loc": "bottom", "location": "lower right"},
)
def test_plot_spatial_scatter_crop_noorigin(self, adata_hne_concat: AnnData):
pl.spatial_scatter(
adata_hne_concat,
shape="circle",
library_key="library_id",
color=["Sox17", "cluster"],
outline_width=(0.05, 0.05),
crop_coord=[(300, 300, 5000, 5000), (3000, 3000, 5000, 5000)],
scalebar_dx=2.0,
scalebar_kwargs={"scale_loc": "bottom", "location": "lower right"},
)
def test_plot_spatial_scatter_group_multi(self, adata_hne: AnnData):
spatial_neighbors(adata_hne)
pl.spatial_scatter(
adata_hne,
shape="circle",
color=["Sox9", "cluster", "leiden"],
groups=["Cortex_1", "Cortex_3", "3"],
crop_coord=[(0, 0, 500, 500)],
connectivity_key="spatial_connectivities",
)
def test_plot_spatial_scatter_group(self, adata_hne_concat: AnnData):
pl.spatial_scatter(
adata_hne_concat,
cmap="inferno",
shape="hex",
library_key="library_id",
library_id=["V1_Adult_Mouse_Brain", "V2_Adult_Mouse_Brain"],
size=[1, 1.25],
color=["Sox17", "cluster"],
edges_width=5,
title=None,
outline=True,
outline_width=(0.05, 0.05),
scalebar_dx=2.0,
scalebar_kwargs={"scale_loc": "bottom", "location": "lower right"},
)
def test_plot_spatial_scatter_nospatial(self, adata_hne_concat: AnnData):
adata = adata_hne_concat.copy()
spatial_neighbors(adata)
adata.uns.pop("spatial")
pl.spatial_scatter(
adata_hne_concat,
shape=None,
library_key="library_id",
library_id=["V1_Adult_Mouse_Brain", "V2_Adult_Mouse_Brain"],
connectivity_key="spatial_connectivities",
edges_width=3,
size=[1.0, 50],
color="cluster",
)
def test_plot_spatial_scatter_axfig(self, adata_hne: AnnData):
fig, ax = plt.subplots(1, 2, figsize=(3, 3), dpi=40)
pl.spatial_scatter(
adata_hne,
shape="square",
color=["Sox17", "cluster"],
fig=fig,
ax=ax,
)
@pytest.mark.skipif(platform.system() == "Darwin", reason="Fails on macOS 3.8 CI")
def test_plot_spatial_scatter_novisium(self, adata_mibitof: AnnData):
spatial_neighbors(adata_mibitof, coord_type="generic", radius=50)
pl.spatial_scatter(
adata_mibitof,
library_key="library_id",
library_id=["point8"],
na_color="lightgrey",
connectivity_key="spatial_connectivities",
edges_width=0.5,
)
def test_plot_spatial_segment(self, adata_mibitof: AnnData):
pl.spatial_segment(
adata_mibitof,
seg_cell_id="cell_id",
library_key="library_id",
na_color="lightgrey",
)
def test_plot_spatial_segment_group(self, adata_mibitof: AnnData):
pl.spatial_segment(
adata_mibitof,
color=["Cluster"],
groups=["Fibroblast", "Endothelial"],
library_key="library_id",
seg_cell_id="cell_id",
img=False,
seg=True,
figsize=(5, 5),
legend_na=False,
scalebar_dx=2.0,
scalebar_kwargs={"scale_loc": "bottom", "location": "lower right"},
)
def test_plot_spatial_segment_crop(self, adata_mibitof: AnnData):
pl.spatial_segment(
adata_mibitof,
color=["Cluster", "cell_size"],
groups=["Fibroblast", "Endothelial"],
library_key="library_id",
seg_cell_id="cell_id",
img=True,
seg=True,
seg_outline=True,
seg_contourpx=15,
figsize=(5, 5),
cmap="magma",
vmin=500,
crop_coord=[(100, 100, 500, 500), (100, 100, 500, 500), (100, 100, 500, 500)],
img_alpha=0.5,
)
def test_plot_spatial_scatter_categorical_alpha(self, adata_hne: AnnData):
pl.spatial_scatter(adata_hne, shape="circle", color="cluster", alpha=0)
class TestSpatialStaticUtils:
@staticmethod
def _create_anndata(shape: str | None, library_id: str | Sequence[str] | None, library_key: str | None):
n_obs = len(library_id) * 2 if isinstance(library_id, list) else 2
X = np.empty((n_obs, 3))
if not isinstance(library_id, list) and library_id is not None:
library_id = [library_id]
if library_id is not None:
obs = | pd.DataFrame(library_id * 2, columns=[library_key]) | pandas.DataFrame |
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column_name = "Modification Type(s)"
observed_mz = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def calculate_proportion(self, occupancy=True):
df = self.df.copy()
#print(df)
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = [# "Isoform",
"Peptides", "Position"]
else:
gr = [# "Isoform",
"Position"]
for _, g in df.groupby(gr):
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
if df is None:
df = self.df
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index([# "Isoform",
"Position", "Glycans"])
else:
temp = df.set_index([# "Isoform",
"Peptides", "Glycans", "Position"])
temp.rename(columns={"Value": name}, inplace=True)
return temp
class GlypnirOComponent:
def __init__(self, filename, area_filename, replicate_id, condition_id, protein_name, minimum_score=0, trust_byonic=False, legacy=False):
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
data = pd.read_excel(filename, sheet_name="Spectra")
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = pd.read_csv(area_filename, sep="\t")
data["Scan number"] = pd.to_numeric(data["Scan #"].str.extract("scan=(\d+)", expand=False))
data = pd.merge(data, file_with_area, left_on="Scan number", right_on="First Scan")
self.protein_name = protein_name
self.data = data.sort_values(by=['Area'], ascending=False)
self.replicate_id = replicate_id
self.condition_id = condition_id
self.data = data[data["Area"].notnull()]
self.data = self.data[(self.data["Score"] >= minimum_score) &
(self.data[protein_column_name].str.contains(protein_name))
# (data["Protein Name"] == ">"+protein_name) &
]
self.data = self.data[~self.data[protein_column_name].str.contains(">Reverse")]
if len(self.data.index) > 0:
self.empty = False
else:
self.empty = True
self.row_to_glycans = {}
self.glycan_to_row = {}
self.trust_byonic = trust_byonic
self.legacy = legacy
self.sequon_glycosites = set()
self.glycosylated_seq = set()
def calculate_glycan(self, glycan):
current_mass = 0
current_string = ""
for i in glycan:
current_string += i
if i == ")":
s = glycan_regex.search(current_string)
if s:
name = s.group(1)
amount = s.group(2)
current_mass += glycan_block_dict[name]*int(amount)
current_string = ""
return current_mass
def process(self):
# entries_number = len(self.data.index)
# if analysis == "N-glycan":
# expand_window = 2
# self.data["total_number_of_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_n-linked_sequon"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_hexnac"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_deamidation"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_asn"] = pd.Series([0] * entries_number, index=self.data.index, dtype=int)
# elif analysis == "O-glycan":
# self.data["total_number_of_hex"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_ser_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_ser_or_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["o_glycosylation_status"] = pd.Series([False]*entries_number, index=self.data.index, dtype=bool)
for i, r in self.data.iterrows():
glycan_dict = {}
search = sequence_regex.search(r[sequence_column_name])
seq = Sequence(search.group(0))
stripped_seq = seq.to_stripped_string()
# modifications = {}
# if pd.notnull(r[modifications_column_name]):
#
# for mod in r[modifications_column_name].split(","):
# number = 1
# if "*" in mod:
# m = mod.split("*")
# minimod = Sequence(m[0].strip())
# number = int(m[1].strip())
#
# else:
# minimod = Sequence(mod.strip())
# for mo in minimod[0].mods:
# if mo.value not in modifications:
# modifications[mo.value] = {}
# modifications[mo.value][minimod[0].value] = {"mod": deepcopy(mo),
# "number": number}
# #if minimod[0].mods[0].value not in modifications:
# # modifications[minimod[0].mods[0].value] = {}
# #modifications[minimod[0].mods[0].value][minimod[0].value] = {"mod": deepcopy(minimod[0].mods[0]),
# # "number": number}
#
# if minimod[0].value == "N":
# if analysis == "N-glycan":
# for mo in minimod[0].mods:
# if mo.value == 1:
# #if minimod[0].mods[0].value == 1:
# self.data.at[i, "total_number_of_deamidation"] += number
# self.data.at[i, "total_number_of_modded_asn"] += number
# elif minimod[0].value in "ST":
# if analysis == "O-glycan":
# for mo in minimod[0].mods:
# self.data.at[i, "total_number_of_modded_ser_thr"] += number
glycans = []
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
if search:
self.data.at[i, "stripped_seq"] = stripped_seq.rstrip(".").lstrip(".")
origin_seq = r[starting_position_column_name] - 1
glycan_reordered = []
self.data.at[i, "origin_start"] = origin_seq
self.data.at[i, "Ending Position"] = r[starting_position_column_name] + len(self.data.at[i, "stripped_seq"])
self.data.at[i, "position_to_glycan"] = ""
if self.trust_byonic:
n_site_status = {}
p_n = r[protein_column_name].lstrip(">")
# print(self.protein_name, p_n)
# motifs = [match for match in seq.find_with_regex(motif, ignore=seq.gaps())]
# if self.analysis == "N-glycan":
# if len(fasta_library[p_n]) >= origin_seq + expand_window:
# if expand_window:
# expanded_window = Sequence(fasta_library[p_n][origin_seq: origin_seq + len(self.data.at[i, "stripped_seq"]) + expand_window])
# expanded_window_motifs = [match for match in expanded_window.find_with_regex(motif, ignore=expanded_window.gaps())]
# origin_map = [i.start + origin_seq for i in expanded_window_motifs]
# if len(expanded_window_motifs) > len(motifs):
# self.data.at[i, "expanded_motif"] = str(expanded_window[expanded_window_motifs[-1]])
# self.data.at[i, "expanded_aa"] = str(expanded_window[-expand_window:])
#
# else:
# origin_map = [i.start + origin_seq for i in motifs]
# else:
# origin_map = [i.start + origin_seq for i in motifs]
#
# if analysis == "N-glycan":
# self.data.at[i, "total_number_of_asn"] = seq.count("N", 0, len(seq))
# if expand_window:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(expanded_window_motifs)
# else:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(motifs)
# self.data.at[i, "total_number_of_unmodded_asn"] = self.data.at[i, "total_number_of_asn"] - self.data.at[i, "total_number_of_modded_asn"]
# elif analysis == "O-glycan":
# self.data.at[i, "total_number_of_ser_thr"] = seq.count("S", 0, len(seq)) + seq.count("T", 0, len(seq))
# self.data.at[i, "total_number_of_unmodded_ser_or_thr"] = self.data.at[i, "total_number_of_modded_ser_thr"] - self.data.at[i, "total_number_of_modded_ser_thr"]
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
self.glycan_to_row[g] = i
glycosylated_site = []
for aa in range(1, len(seq) - 1):
if seq[aa].mods:
mod_value = float(seq[aa].mods[0].value)
round_mod_value = round(mod_value)
# str_mod_value = seq[aa].mods[0].value[0] + str(round_mod_value)
#if str_mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
# if seq[aa].value in modifications[str_mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[str_mod_value][seq[aa].value]['number'] > 0:
# modifications[str_mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = mod_value
round_3 = round(mod_value, 3)
if str(round_3) in glycan_dict:
seq[aa].extra = "Glycosylated"
pos = int(r[starting_position_column_name]) + aa - 2
self.sequon_glycosites.add(pos + 1)
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = seq[aa].value + str(pos + 1)
glycosylated_site.append(self.data.at[i, position] + "_" + str(round_mod_value))
glycosylation_count += 1
glycan_reordered.append(glycan_dict[str(round_3)])
if glycan_reordered:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if self.trust_byonic:
# if not in origin_map:
#
# # position = "{}_position".format(str(glycosylation_count))
# # self.data.at[i, position] = seq[aa].value + str(
# # r[starting_position_column_name]+aa)
# # self.data.at[i, position + "_match"] = "H"
# # glycosylation_count += 1
# self.data.at[i, "total_number_of_hexnac"] += 1
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
# if mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
#
# if seq[aa].value in modifications[mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[mod_value][seq[aa].value]['number'] > 0:
# modifications[mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = float(seq[aa].mods[0].value)
#
# if max_glycans and current_glycan != max_glycans:
#
# seq[aa].mods[0].value = glycans[current_glycan]
# seq[aa].extra = "Glycosylated"
#
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if "hexnac" in glycans[current_glycan].lower():
# self.data.at[i, "total_number_of_hexnac"] += 1
#
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
#
# current_glycan += 1
#if current_glycan == max_glycans:
#break
# for n in origin_map:
# position = "{}_position".format(str(glycosylation_count))
# self.data.at[i, position] = seq[n-origin_seq+1].value + str(
# n + 1)
#
# if seq[n-origin_seq+1].extra == "Glycosylated":
# self.data.at[i, position + "_match"] = "H"
# elif seq[n-origin_seq+1].extra == "Deamidated":
# self.data.at[i, position + "_match"] = "D"
# else:
# self.data.at[i, position + "_match"] = "U"
#
# if analysis == "N-glycan":
# if self.legacy:
# if self.data.at[i, "total_number_of_n-linked_sequon"] != self.data.at[i, "total_number_of_hexnac"]:
# if seq[n-origin_seq+1].extra == "Deamidated":
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "D"
# else:
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# if self.data.at[i, "total_number_of_deamidation"] == 0:
# self.data.at[i, position + "_match"] = "H"
# else:
# self.data.at[i, position + "_match"] ="D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# if not seq[n-origin_seq+1].extra:
# if self.data.at[i, "total_number_of_hexnac"] > 0 and self.data.at[i, "total_number_of_deamidation"]> 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# elif self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "U"
# glycosylation_count += 1
else:
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
glycans.sort()
self.data.at[i, glycans_column_name] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
self.glycosylated_seq.add(self.data.at[i, "stripped_seq"])
def analyze(self, max_sites=0, combine_d_u=True, splitting_sites=False):
result = []
temp = self.data.sort_values(["Area", "Score"], ascending=False)
temp[glycans_column_name] = temp[glycans_column_name].fillna("None")
out = []
if self.trust_byonic:
seq_glycosites = list(self.sequon_glycosites)
seq_glycosites.sort()
# print(seq_glycosites)
# if self.analysis == "N-glycan":
# if max_sites == 0:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"])]
# else:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"]) & (temp["total_number_of_n-linked_sequon"]<= max_sites) ]
for i, g in temp.groupby(["stripped_seq", "z", "glycoprofile", observed_mz]):
seq_within = []
unique_row = g.loc[g["Area"].idxmax()]
#
# glycan = 0
# first_site = ""
if seq_glycosites:
for n in seq_glycosites:
if unique_row[starting_position_column_name] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][n-unique_row[starting_position_column_name]]+str(n))
# print(unique_row)
# if self.legacy:
# for c in range(len(unique_row.index)):
# if unique_row.index[c].endswith("_position"):
#
# if pd.notnull(unique_row[unique_row.index[c]]):
# if not first_site:
# first_site = unique_row[unique_row.index[c]]
# if unique_row[unique_row.index[c]] not in result:
# result[unique_row[unique_row.index[c]]] = {}
#
# if "U" in unique_row[unique_row.index[c+1]]:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# elif "D" in unique_row[unique_row.index[c+1]]:
# if combine_d_u:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# else:
# if "D" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["D"] = 0
# result[unique_row[unique_row.index[c]]]["D"] += unique_row["Area"]
# else:
# if splitting_sites or unique_row["total_number_of_hexnac"] == 1:
#
# if self.row_to_glycans[unique_row.name][glycan] not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][self.row_to_glycans[unique_row.name][glycan]] = 0
# result[unique_row[unique_row.index[c]]][
# self.row_to_glycans[unique_row.name][glycan]] += unique_row["Area"]
# glycan += 1
#
# else:
# if unique_row["total_number_of_hexnac"] > 1 and not splitting_sites:
# temporary_glycan = ";".join(self.row_to_glycans[unique_row.name][glycan])
#
# if temporary_glycan not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][temporary_glycan] = unique_row["Area"]
# break
# else:
glycosylation_count = 0
glycans = unique_row["position_to_glycan"].split(",")
for c in range(len(unique_row.index)):
if unique_row.index[c].endswith("_position"):
if pd.notnull(unique_row[unique_row.index[c]]):
pos = unique_row[unique_row.index[c]]
result.append({"Position": pos, "Glycans": glycans[glycosylation_count], "Value": unique_row["Area"]})
ind = seq_within.index(pos)
seq_within.pop(ind)
glycosylation_count += 1
if seq_within:
for s in seq_within:
result.append({"Position": s, "Glycans": "U", "Value": unique_row["Area"]})
# if N_combo:
#
# N_combo.sort()
# sequons = ";".join(N_combo)
#
# # working_isoform = unique_row["isoform"]
# # if working_isoform not in result:
# # # if working_isoform != 1.0 and 1.0 in result:
# # # if sequons in result[working_isoform][1.0]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][sequons] or "U" in result[working_isoform][1.0][sequons]:
# # # working_isoform = 1.0
# # # else:
# # result[working_isoform] = {}
# if sequons not in result[working_isoform]:
# result[working_isoform][sequons] = {}
# #if pd.notnull(unique_row[glycans_column_name]):
# if unique_row[glycans_column_name] != "None":
# if unique_row[glycans_column_name] not in result[working_isoform][sequons]:
# result[working_isoform][sequons][unique_row[glycans_column_name]] = 0
# result[working_isoform][sequons][unique_row[glycans_column_name]] += unique_row["Area"]
# else:
# if "U" not in result[working_isoform][sequons]:
# result[working_isoform][sequons]["U"] = 0
# result[working_isoform][sequons]["U"] += unique_row["Area"]
# #print(result)
if result:
result = pd.DataFrame(result)
group = result.groupby(["Position", "Glycans"])
out = group.agg(np.sum).reset_index()
else:
out = pd.DataFrame([], columns=["Position", "Glycans", "Values"])
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# out.append({"Isoform": k, "Position": k2, "Glycans": k3, "Value": result[k][k2][k3]})
else:
# result_total = {}
# if max_sites != 0:
# temp = temp[temp['total_number_of_hex'] <= max_sites]
for i, g in temp.groupby(["stripped_seq", "z", glycans_column_name, starting_position_column_name, observed_mz]):
unique_row = g.loc[g["Area"].idxmax()]
if unique_row[glycans_column_name] != "None":
result.append({"Peptides": i[0], "Glycans": i[2], "Value": unique_row["Area"], "Position": i[3]})
else:
result.append({"Peptides": i[0], "Glycans": "U", "Value": unique_row["Area"], "Position": i[3]})
result = pd.DataFrame(result)
group = result.groupby(["Peptides", "Position", "Glycans"])
out = group.agg(np.sum).reset_index()
# working_isoform = unique_row["isoform"]
# if working_isoform not in result:
# # if working_isoform != 1.0 and 1.0 in result:
# # if unique_row["stripped_seq"] in result[working_isoform][1.0]:
# # #if i[3] in result[working_isoform][1.0][unique_row["stripped_seq"]]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]] or "U" in \
# # # result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]]:
# # working_isoform = 1.0
# # else:
# result[working_isoform] = {}
#
# if unique_row["stripped_seq"] not in result[working_isoform]:
# result[working_isoform][unique_row["stripped_seq"]] = {}
# # result_total[unique_row["isoform"]][unique_row["stripped_seq"]] = 0
# if i[3] not in result[working_isoform][unique_row["stripped_seq"]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]] = {}
# if i[2] == "None":
# if "U" not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] += unique_row["Area"]
#
# else:
# # if splitting_sites:
# # for gly in self.row_to_glycans[unique_row.name]:
# # if gly not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] = 0
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] += unique_row["Area"]
# # else:
# if unique_row[glycans_column_name] not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] += unique_row["Area"]
#
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# for k4 in result[k][k2][k3]:
# out.append({"Isoform": k, "Peptides": k2, "Glycans": k4, "Value": result[k][k2][k3][k4], "Position": k3})
return Result(out)
class GlypnirO:
def __init__(self, trust_byonic=False, get_uniprot=False):
self.trust_byonic = trust_byonic
self.components = None
self.uniprot_parsed_data = pd.DataFrame([])
self.get_uniprot = get_uniprot
def add_component(self, filename, area_filename, replicate_id, sample_id):
component = GlypnirOComponent(filename, area_filename, replicate_id, sample_id)
def add_batch_component(self, component_list, minimum_score, protein=None, combine_uniprot_isoform=True, legacy=False):
self.load_dataframe(component_list)
protein_list = []
if protein is not None:
self.components["Protein"] = pd.Series([protein]*len(self.components.index), index=self.components.index)
for i, r in self.components.iterrows():
comp = GlypnirOComponent(r["filename"], r["area_filename"], r["replicate_id"], condition_id=r["condition_id"], protein_name=protein, minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
self.components.at[i, "component"] = comp
print("{} - {}, {} peptides has been successfully loaded".format(r["condition_id"], r["replicate_id"], str(len(comp.data.index))))
else:
components = []
for i, r in self.components.iterrows():
data = pd.read_excel(r["filename"], sheet_name="Spectra")
protein_id_column = protein_column_name
if combine_uniprot_isoform:
protein_id_column = "master_id"
for i2, r2 in data.iterrows():
search = uniprot_regex.search(r2[protein_column_name])
if not r2[protein_column_name].startswith(">Reverse") and not r2[protein_column_name].endswith("(Common contaminant protein)"):
if search:
data.at[i2, "master_id"] = search.groupdict(default="")["accession"]
if not self.get_uniprot:
protein_list.append([search.groupdict(default="")["accession"], r2[protein_column_name]])
if search.groupdict(default="")["isoform"] != "":
data.at[i2, "isoform"] = int(search.groupdict(default="")["isoform"][1:])
else:
data.at[i2, "isoform"] = 1
else:
data.at[i2, "master_id"] = r2[protein_column_name]
data.at[i2, "isoform"] = 1
else:
data.at[i2, "master_id"] = r2[protein_column_name]
data.at[i2, "isoform"] = 1
if r["area_filename"].endswith("xlsx"):
file_with_area = pd.read_excel(r["area_filename"])
else:
file_with_area = pd.read_csv(r["area_filename"], sep="\t")
for index, g in data.groupby([protein_id_column]):
u = index
if not u.startswith(">Reverse") and not u.endswith("(Common contaminant protein)"):
comp = GlypnirOComponent(g, file_with_area, r["replicate_id"],
condition_id=r["condition_id"], protein_name=u,
minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
if not comp.empty:
components.append({"filename": r["filename"], "area_filename": r["area_filename"], "condition_id": r["condition_id"], "replicate_id": r["replicate_id"], "Protein": u, "component": comp})
yield i, r
print(
"{} - {} peptides has been successfully loaded".format(r["condition_id"],
r["replicate_id"]))
self.components = pd.DataFrame(components, columns=list(self.components.columns) + ["component", "Protein"])
if not self.get_uniprot:
protein_df = | pd.DataFrame(protein_list, columns=["Entry", "Protein names"]) | pandas.DataFrame |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_unsorted(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", "b", "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_missing(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", np.nan, "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(all_parsers):
# see gh-18186
parser = all_parsers
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({"a": Categorical(data, ordered=True)})
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True
)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
encoding = "latin-1"
expected = parser.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
encoding = "utf-16"
sep = "\t"
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
expected = expected.apply(Categorical)
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
]
actuals = parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ["a", "b", "c"]
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
DataFrame(
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, index=[2, 3]
),
]
dtype = CategoricalDtype(cats)
actuals = parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"categories",
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
)
def test_categorical_category_dtype(all_parsers, categories, ordered):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(
["a", "b", "b", "c"], categories=categories, ordered=ordered
),
}
)
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_category_dtype_unsorted(all_parsers):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(["c", "b", "a"])
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
}
)
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_numeric(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_datetime(all_parsers):
parser = all_parsers
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
dtype = {"b": CategoricalDtype(dti)}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timestamp(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timedelta(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
data = "b\n1H\n2H\n3H"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
"b\nTrue\nFalse\nNA\nFalse",
"b\ntrue\nfalse\nNA\nfalse",
"b\nTRUE\nFALSE\nNA\nFALSE",
"b\nTrue\nFalse\nNA\nFALSE",
],
)
def test_categorical_dtype_coerces_boolean(all_parsers, data):
# see gh-20498
parser = all_parsers
dtype = {"b": CategoricalDtype([False, True])}
expected = DataFrame({"b": Categorical([True, False, None, False])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], names=["one", "two"]
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from __future__ import absolute_import
from __future__ import print_function
import os
import pandas as pd
import numpy as np
import sys
import shutil
from sklearn.preprocessing import MinMaxScaler
def dataframe_from_csv(path, header=0, index_col=False):
return pd.read_csv(path, header=header, index_col=index_col)
var_to_consider = [
"glucose",
"Invasive BP Diastolic",
"Invasive BP Systolic",
"O2 Saturation",
"Respiratory Rate",
"Motor",
"Eyes",
"MAP (mmHg)",
"Heart Rate",
"GCS Total",
"Verbal",
"pH",
"FiO2",
"Temperature (C)",
]
# Filter on useful column for this benchmark
def filter_patients_on_columns_model(patients):
columns = [
"patientunitstayid",
"gender",
"age",
"ethnicity",
"apacheadmissiondx",
"admissionheight",
"hospitaladmitoffset",
"admissionweight",
"hospitaldischargestatus",
"unitdischargeoffset",
"unitdischargestatus",
]
return patients[columns]
# Select unique patient id
def cohort_stay_id(patients):
cohort = patients.patientunitstayid.unique()
return cohort
# Convert gender from F/M to numbers
g_map = {"Female": 1, "Male": 2, "": 0, "NaN": 0, "Unknown": 0, "Other": 0}
def transform_gender(gender_series):
global g_map
return {
"gender": gender_series.fillna("").apply(
lambda s: g_map[s] if s in g_map else g_map[""]
)
}
# Convert ethnicity to numbers
e_map = {
"Asian": 1,
"African American": 2,
"Caucasian": 3,
"Hispanic": 4,
"Native American": 5,
"NaN": 0,
"": 0,
}
def transform_ethnicity(ethnicity_series):
global e_map
return {
"ethnicity": ethnicity_series.fillna("").apply(
lambda s: e_map[s] if s in e_map else e_map[""]
)
}
# Convert hospital/unit discharge status into numbers
h_s_map = {"Expired": 1, "Alive": 0, "": 2, "NaN": 2}
def transform_hospital_discharge_status(status_series):
global h_s_map
return {
"hospitaldischargestatus": status_series.fillna("").apply(
lambda s: h_s_map[s] if s in h_s_map else h_s_map[""]
)
}
def transform_unit_discharge_status(status_series):
global h_s_map
return {
"unitdischargestatus": status_series.fillna("").apply(
lambda s: h_s_map[s] if s in h_s_map else h_s_map[""]
)
}
# Convert diagnosis into numbers
def transform_dx_into_id(df):
df.apacheadmissiondx.fillna("nodx", inplace=True)
dx_type = df.apacheadmissiondx.unique()
dict_dx_key = pd.factorize(dx_type)[1]
dict_dx_val = pd.factorize(dx_type)[0]
dictionary = dict(zip(dict_dx_key, dict_dx_val))
df["apacheadmissiondx"] = df["apacheadmissiondx"].map(dictionary)
return df
## Extract the root data
# Extract data from patient table
def read_patients_table(eicu_path, output_path):
pats = dataframe_from_csv(os.path.join(eicu_path, "patient.csv"), index_col=False)
pats = filter_patients_on_age(pats, min_age=18, max_age=89)
pats = filter_one_unit_stay(pats)
pats = filter_patients_on_columns(pats)
pats.update(transform_gender(pats.gender))
pats.update(transform_ethnicity(pats.ethnicity))
pats.update(transform_hospital_discharge_status(pats.hospitaldischargestatus))
pats.update(transform_unit_discharge_status(pats.unitdischargestatus))
pats = transform_dx_into_id(pats)
pats.to_csv(os.path.join(output_path, "all_stays.csv"), index=False)
pats = filter_patients_on_columns_model(pats)
return pats
# filter on adult patients
def filter_patients_on_age(patient, min_age=18, max_age=89):
patient.ix[patient["age"] == "> 89", "age"] = 90
patient[["age"]] = patient[["age"]].fillna(-1)
patient[["age"]] = patient[["age"]].astype(int)
patient = patient.ix[(patient.age >= min_age) & (patient.age <= max_age)]
return patient
# filter those having just one stay in unit
def filter_one_unit_stay(patients):
cohort_count = patients.groupby(by="uniquepid").count()
index_cohort = cohort_count[cohort_count["patientunitstayid"] == 1].index
patients = patients[patients["uniquepid"].isin(index_cohort)]
return patients
# Filter on useful columns from patient table
def filter_patients_on_columns(patients):
columns = [
"patientunitstayid",
"gender",
"age",
"ethnicity",
"apacheadmissiondx",
"hospitaladmityear",
"hospitaldischargeyear",
"hospitaldischargeoffset",
"admissionheight",
"hospitaladmitoffset",
"admissionweight",
"hospitaldischargestatus",
"unitdischargeoffset",
"unitdischargestatus",
]
return patients[columns]
# Write the selected cohort data from patient table into pat.csv for each patient
def break_up_stays_by_unit_stay(pats, output_path, stayid=None, verbose=1):
unit_stays = pats.patientunitstayid.unique() if stayid is None else stayid
nb_unit_stays = unit_stays.shape[0]
for i, stay_id in enumerate(unit_stays):
if verbose:
sys.stdout.write("\rStayID {0} of {1}...".format(i + 1, nb_unit_stays))
dn = os.path.join(output_path, str(stay_id))
try:
os.makedirs(dn)
except:
pass
pats.ix[pats.patientunitstayid == stay_id].sort_values(
by="hospitaladmitoffset"
).to_csv(os.path.join(dn, "pats.csv"), index=False)
if verbose:
sys.stdout.write("DONE!\n")
## Here we deal with lab table
# Select the useful columns from lab table
def filter_lab_on_columns(lab):
columns = ["patientunitstayid", "labresultoffset", "labname", "labresult"]
return lab[columns]
# Rename the columns in order to have a unified name
def rename_lab_columns(lab):
lab.rename(
index=str,
columns={
"labresultoffset": "itemoffset",
"labname": "itemname",
"labresult": "itemvalue",
},
inplace=True,
)
return lab
# Select the lab measurement from lab table
def item_name_selected_from_lab(lab, items):
lab = lab[lab["itemname"].isin(items)]
return lab
# Check if the lab measurement is valid
def check(x):
try:
x = float(str(x).strip())
except:
x = np.nan
return x
def check_itemvalue(df):
df["itemvalue"] = df["itemvalue"].apply(lambda x: check(x))
df["itemvalue"] = df["itemvalue"].astype(float)
return df
# extract the lab items for each patient
def read_lab_table(eicu_path):
lab = dataframe_from_csv(os.path.join(eicu_path, "lab.csv"), index_col=False)
items = ["bedside glucose", "glucose", "pH", "FiO2"]
lab = filter_lab_on_columns(lab)
lab = rename_lab_columns(lab)
lab = item_name_selected_from_lab(lab, items)
lab.loc[
lab["itemname"] == "bedside glucose", "itemname"
] = "glucose" # unify bedside glucose and glucose
lab = check_itemvalue(lab)
return lab
# Write the available lab items of a patient into lab.csv
def break_up_lab_by_unit_stay(lab, output_path, stayid=None, verbose=1):
unit_stays = lab.patientunitstayid.unique() if stayid is None else stayid
nb_unit_stays = unit_stays.shape[0]
for i, stay_id in enumerate(unit_stays):
if verbose:
sys.stdout.write("\rStayID {0} of {1}...".format(i + 1, nb_unit_stays))
dn = os.path.join(output_path, str(stay_id))
try:
os.makedirs(dn)
except:
pass
lab.ix[lab.patientunitstayid == stay_id].sort_values(by="itemoffset").to_csv(
os.path.join(dn, "lab.csv"), index=False
)
if verbose:
sys.stdout.write("DONE!\n")
# Filter the useful columns from nc table
def filter_nc_on_columns(nc):
columns = [
"patientunitstayid",
"nursingchartoffset",
"nursingchartcelltypevallabel",
"nursingchartcelltypevalname",
"nursingchartvalue",
]
return nc[columns]
# Unify the column names in order to be used later
def rename_nc_columns(nc):
nc.rename(
index=str,
columns={
"nursingchartoffset": "itemoffset",
"nursingchartcelltypevalname": "itemname",
"nursingchartcelltypevallabel": "itemlabel",
"nursingchartvalue": "itemvalue",
},
inplace=True,
)
return nc
# Select the items using name and label
def item_name_selected_from_nc(nc, label, name):
nc = nc[(nc.itemname.isin(name)) | (nc.itemlabel.isin(label))]
return nc
# Convert fahrenheit to celsius
def conv_far_cel(nc):
nc["itemvalue"] = nc["itemvalue"].astype(float)
nc.loc[nc["itemname"] == "Temperature (F)", "itemvalue"] = (
nc["itemvalue"] - 32
) * (5 / 9)
return nc
# Unify the different names into one for each measurement
def replace_itemname_value(nc):
nc.loc[nc["itemname"] == "Value", "itemname"] = nc.itemlabel
nc.loc[
nc["itemname"] == "Non-Invasive BP Systolic", "itemname"
] = "Invasive BP Systolic"
nc.loc[
nc["itemname"] == "Non-Invasive BP Diastolic", "itemname"
] = "Invasive BP Diastolic"
nc.loc[nc["itemname"] == "Temperature (F)", "itemname"] = "Temperature (C)"
nc.loc[nc["itemlabel"] == "Arterial Line MAP (mmHg)", "itemname"] = "MAP (mmHg)"
return nc
# Select the nurseCharting items and save it into nc
def read_nc_table(eicu_path):
# import pdb;pdb.set_trace()
nc = dataframe_from_csv(
os.path.join(eicu_path, "nurseCharting.csv"), index_col=False
)
nc = filter_nc_on_columns(nc)
nc = rename_nc_columns(nc)
typevallabel = [
"Glasgow coma score",
"Heart Rate",
"O2 Saturation",
"Respiratory Rate",
"MAP (mmHg)",
"Arterial Line MAP (mmHg)",
]
typevalname = [
"Non-Invasive BP Systolic",
"Invasive BP Systolic",
"Non-Invasive BP Diastolic",
"Invasive BP Diastolic",
"Temperature (C)",
"Temperature (F)",
]
nc = item_name_selected_from_nc(nc, typevallabel, typevalname)
nc = check_itemvalue(nc)
nc = conv_far_cel(nc)
replace_itemname_value(nc)
del nc["itemlabel"]
return nc
# Write the nc values of each patient into a nc.csv file
def break_up_stays_by_unit_stay_nc(nursecharting, output_path, stayid=None, verbose=1):
unit_stays = nursecharting.patientunitstayid.unique() if stayid is None else stayid
nb_unit_stays = unit_stays.shape[0]
for i, stay_id in enumerate(unit_stays):
if verbose:
sys.stdout.write("\rStayID {0} of {1}...".format(i + 1, nb_unit_stays))
dn = os.path.join(output_path, str(stay_id))
try:
os.makedirs(dn)
except:
pass
nursecharting.ix[nursecharting.patientunitstayid == stay_id].sort_values(
by="itemoffset"
).to_csv(os.path.join(dn, "nc.csv"), index=False)
if verbose:
sys.stdout.write("DONE!\n")
# Write the time-series data into one csv for each patient
def extract_time_series_from_subject(t_path):
print("Convert to time series ...")
print(
"This will take some hours, as the imputation and binning and converting time series are done here ..."
)
filter_15_200 = 0
for i, stay_dir in enumerate(os.listdir(t_path)):
# import pdb;pdb.set_trace()
dn = os.path.join(t_path, stay_dir)
try:
stay_id = int(stay_dir)
if not os.path.isdir(dn):
raise Exception
except:
continue
try:
pat = dataframe_from_csv(os.path.join(t_path, stay_dir, "pats.csv"))
lab = dataframe_from_csv(os.path.join(t_path, stay_dir, "lab.csv"))
nc = dataframe_from_csv(os.path.join(t_path, stay_dir, "nc.csv"))
nclab = pd.concat([nc, lab]).sort_values(by=["itemoffset"])
timeepisode = convert_events_to_timeseries(nclab, variables=var_to_consider)
nclabpat = | pd.merge(timeepisode, pat, on="patientunitstayid") | pandas.merge |
"""Main module."""
from avroconvert import logger
import csv
from fastavro import reader
from io import BytesIO
from itertools import chain
from json import dump
from os.path import join, exists, dirname
from pandas import DataFrame
from pathlib import Path
from pyarrow import Table
from pyarrow.parquet import write_table
class AvroConvert:
'''
A class used to read avro files and convert them to csv,
parquet and json format
:param outfolder: output folder to write the output files
to
:type outfolder: str
:param header: Extracts header from the file if it is set to True
:type header: bool
:param dst_format: Specifies the format to convert the avro data to
:type dst_format: str
'''
def __init__(self, outfolder: str, dst_format: str = 'parquet', header: bool = True):
"""
:param header: Extracts header from the file if it is set to True
:type header: bool
:param dst_format: Specifies the format to convert the avro data to
:type dst_format: str
:param data: Contains raw data in the form of bytes as read from
filesystem, google cloud storage or S3. Multiple
files are read sequentially and their respective data
is appended to this list which is passed as the
variable `data`
:type data: list
"""
self.header = header
self.dst_format = dst_format.lower()
# self.data = data
self.outfolder = outfolder
self._check_output_folder(outfolder)
def convert_avro(self, filename: str, data: bytes) -> str:
'''
Reads byte data, converts it to avro format and writes
the data to the local filesystem to the output format
specified.
:param filename: Name of the input file (with it's source path).
The output file will be saved by the same name,
within the same folder hierarchy as it was in
the source file system. The extension will be
changed as per the given output format
:type filename: str
:param data: Contains raw data in the form of bytes as read from
filesystem, google cloud storage or S3. Multiple
files are read sequentially and their respective data
is appended to this list which is passed as the
variable `data`
:type data: bytes
:returns: File name with path of the output file
:rtype: str
'''
if not bool(data):
return None
try:
logger.info('Converting bytes to avro')
logger.info(f'File {filename} in progress')
outfile = join(self.outfolder, self._change_file_extn(filename))
avrodata = [r for r in reader(BytesIO(data))]
logger.info(
f'Total {len(avrodata)} records found in file is {filename}')
writer_function = getattr(self, f'_to_{self.dst_format}')
writer_function(data=avrodata, outfile=outfile)
logger.info(f'[COMPLETED] File {outfile} complete')
return f'File {outfile} complete'
except Exception as e:
logger.exception(f'[FAILED] File {outfile} failed')
raise e
def _to_csv(self, data, outfile: str) -> str:
'''
Write the avro data to a csv file
:param data: Avro formatted data
:type data: avro data
:param outfile: Output filepath. The avro data which is
converted to csv, will be stored at this location.
If a non-existent folder name is given,
the folder will be created and the csv file will
be written there.
Example: ./data/1970-01-01/FILE.csv
:type outfile: str
:returns: path of the output csv file
:rtype: str
'''
count = 0
logger.info(f'Output folder check {outfile}')
self._check_output_folder(outfile)
f = csv.writer(open(outfile, "w+"))
for row in data:
if self.header == True:
header = row.keys()
f.writerow(header)
self.header = False
count += 1
f.writerow(row.values())
return outfile
def _to_parquet(self, data, outfile: str) -> str:
'''
Write the avro data to a parquet file
:param data: Avro formatted data
:type data: avro data
:param outfile: Output filepath. The avro data which is converted to
parquet, will be stored at this location. If a non-existent
folder name is given, the folder will be created and the
parquet file will be written there.
Example: ./data/1970-01-01/FILE.parquet
:type outfile: str
:returns: path of the output parquet file
:rtype: str
'''
self._check_output_folder(outfile)
# TODO: support for partitioned storage
# table = Table.from_pandas(
# DataFrame(list(chain.from_iterable(self.data))))
logger.info(f'Writing {outfile} to parquet format')
try:
table = Table.from_pandas(
DataFrame(data))
write_table(table, outfile, flavor='spark')
return outfile
except Exception as e:
raise e
def _to_json(self, data, outfile: str) -> str:
'''
Write the avro data to a json file
:param data: Avro formatted data
:type data: avro data
:param outfile: Output filepath. The avro data which is converted to
json, will be stored at this location. If a non-existent
folder name is given, the folder will be created and the
json file will be written there.
Example: ./data/1970-01-01/FILE.json
:type outfile: str
:returns: path of the output json file
:rtype: str
'''
self._check_output_folder(outfile)
df = | DataFrame(data) | pandas.DataFrame |
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
__copyright__ = "Copyright 2015-2016 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import os
import sys
import numpy as np
import pandas as pd
from .Error import NotImplementedError, UnexpectedError
from .Logger import FastTripsLogger
from .Passenger import Passenger
from .Route import Route
from .TAZ import TAZ
from .Trip import Trip
from .Util import Util
#: Default user class: just one class called "all"
def generic_user_class(row_series):
return "all"
class PathSet(object):
"""
Represents a path set for a passenger from an origin :py:class:`TAZ` to a destination :py:class:`TAZ`
through a set of stops.
"""
#: Paths output file
PATHS_OUTPUT_FILE = 'ft_output_passengerPaths.txt'
#: Path times output file
PATH_TIMES_OUTPUT_FILE = 'ft_output_passengerTimes.txt'
#: Configured functions, indexed by name
CONFIGURED_FUNCTIONS = { 'generic_user_class':generic_user_class }
#: Path configuration: Name of the function that defines user class
USER_CLASS_FUNCTION = None
#: File with weights file. Space delimited table.
WEIGHTS_FILE = 'pathweight_ft.txt'
#: Path weights
WEIGHTS_DF = None
#: Read weights file as fixed-width format. If false, standard CSV format is read.
WEIGHTS_FIXED_WIDTH = False
#: Configuration: Minimum transfer penalty. Safeguard against having no transfer penalty
#: which can result in terrible paths with excessive transfers.
MIN_TRANSFER_PENALTY = None
#: Configuration: Overlap scale parameter.
OVERLAP_SCALE_PARAMETER = None
#: Configuration: Overlap variable. Can be "None", "count", "distance", "time".
OVERLAP_VARIABLE = None
#: Overlap variable option: None. Don't use overlap pathsize correction.
OVERLAP_NONE = "None"
#: Overlap variable option: count. Use leg count overlap pathsize correction.
OVERLAP_COUNT = "count"
#: Overlap variable option: distance. Use leg distance overlap pathsize correction.
OVERLAP_DISTANCE = "distance"
#: Overlap variable option: time. Use leg time overlap pathsize correction.
OVERLAP_TIME = "time"
#: Valid values for OVERLAP_VARAIBLE
OVERLAP_VARIABLE_OPTIONS = [OVERLAP_NONE,
OVERLAP_COUNT,
OVERLAP_DISTANCE,
OVERLAP_TIME]
#: Overlap chunk size. How many person's trips to process at a time in overlap calculations
#: in python simulation
OVERLAP_CHUNK_SIZE = None
#: Overlap option: Split transit leg into component parts? e.g. split A-E
#: into A-B-C-D-E for overlap calculations?
OVERLAP_SPLIT_TRANSIT = None
LEARN_ROUTES = False
LEARN_ROUTES_RATE = 0.05
SUCCESS_FLAG_COLUMN = 'success_flag'
BUMP_FLAG_COLUMN = 'bump_flag'
#: Allow departures and arrivals before / after preferred time
ARRIVE_LATE_ALLOWED_MIN = datetime.timedelta(minutes = 0)
DEPART_EARLY_ALLOWED_MIN = datetime.timedelta(minutes = 0)
CONSTANT_GROWTH_MODEL = 'constant'
EXP_GROWTH_MODEL = 'exponential'
LOGARITHMIC_GROWTH_MODEL = 'logarithmic'
LOGISTIC_GROWTH_MODEL = 'logistic'
PENALTY_GROWTH_MODELS = [
CONSTANT_GROWTH_MODEL,
EXP_GROWTH_MODEL,
LOGARITHMIC_GROWTH_MODEL,
LOGISTIC_GROWTH_MODEL,
]
#: Weights column: User Class
WEIGHTS_COLUMN_USER_CLASS = "user_class"
#: Weights column: Purpose
WEIGHTS_COLUMN_PURPOSE = "purpose"
#: Weights column: Demand Mode Type
WEIGHTS_COLUMN_DEMAND_MODE_TYPE = "demand_mode_type"
#: Weights column: Demand Mode Type
WEIGHTS_COLUMN_DEMAND_MODE = "demand_mode"
#: Weights column: Supply Mode
WEIGHTS_COLUMN_SUPPLY_MODE = "supply_mode"
#: Weights column: Weight Name
WEIGHTS_COLUMN_WEIGHT_NAME = "weight_name"
#: Weights column: Weight Value
WEIGHTS_COLUMN_WEIGHT_VALUE = "weight_value"
#: Weights column: Growth Type
WEIGHTS_GROWTH_TYPE = "growth_type"
#: Weights column: Log Base for logarithmic growth function
WEIGHTS_GROWTH_LOG_BASE = "log_base"
#: Weights column: Max value for logistic growth function
WEIGHTS_GROWTH_LOGISTIC_MAX = "logistic_max"
#: Weights column: Midpoint value for logistic growth function
WEIGHTS_GROWTH_LOGISTIC_MID = "logistic_mid"
WEIGHT_NAME_DEPART_EARLY_MIN = "depart_early_min"
WEIGHT_NAME_ARRIVE_LATE_MIN = "arrive_late_min"
WEIGHT_NAME_DEPART_LATE_MIN = 'depart_late_min'
WEIGHT_NAME_ARRIVE_EARLY_MIN = 'arrive_early_min'
WEIGHT_NAME_VALID_NAMES = [
WEIGHT_NAME_DEPART_EARLY_MIN,
WEIGHT_NAME_DEPART_LATE_MIN,
WEIGHT_NAME_ARRIVE_EARLY_MIN,
WEIGHT_NAME_ARRIVE_LATE_MIN,
]
# ========== Added by fasttrips =======================================================
#: Weights column: Supply Mode number
WEIGHTS_COLUMN_SUPPLY_MODE_NUM = "supply_mode_num"
#: File with weights for c++
OUTPUT_WEIGHTS_FILE = "ft_intermediate_weights.txt"
DIR_OUTBOUND = 1 #: Trips outbound from home have preferred arrival times
DIR_INBOUND = 2 #: Trips inbound to home have preferred departure times
PATH_KEY_COST = "pf_cost" #: path cost according to pathfinder
PATH_KEY_FARE = "pf_fare" #: path fare according to pathfinder
PATH_KEY_PROBABILITY = "pf_probability" #: path probability according to pathfinder
PATH_KEY_INIT_COST = "pf_initcost" #: initial cost (in pathfinding, before path was finalized)
PATH_KEY_INIT_FARE = "pf_initfare" #: initial fare (in pathfinding, before path was finalized)
PATH_KEY_STATES = "states"
STATE_IDX_LABEL = 0 #: :py:class:`datetime.timedelta` instance
STATE_IDX_DEPARR = 1 #: :py:class:`datetime.datetime` instance. Departure if outbound/backwards, arrival if inbound/forwards.
STATE_IDX_DEPARRMODE = 2 #: mode id
STATE_IDX_TRIP = 3 #: trip id
STATE_IDX_SUCCPRED = 4 #: stop identifier or TAZ identifier
STATE_IDX_SEQ = 5 #: sequence (for trip)
STATE_IDX_SEQ_SUCCPRED = 6 #: sequence for successor/predecessor
STATE_IDX_LINKTIME = 7 #: :py:class:`datetime.timedelta` instance
STATE_IDX_LINKFARE = 8 #: fare cost, float
STATE_IDX_LINKCOST = 9 #: link generalized cost, float for hyperpath/stochastic,
STATE_IDX_LINKDIST = 10 #: link distance, float
STATE_IDX_COST = 11 #: cost float, for hyperpath/stochastic assignment
STATE_IDX_ARRDEP = 12 #: :py:class:`datetime.datetime` instance. Arrival if outbound/backwards, departure if inbound/forwards.
# these are also the demand_mode_type values
STATE_MODE_ACCESS = "access"
STATE_MODE_EGRESS = "egress"
STATE_MODE_TRANSFER = "transfer"
# new
STATE_MODE_TRIP = "transit" # onboard
BUMP_EXPERIENCED_COST = 999999
HUGE_COST = 9999
def __init__(self, trip_list_dict):
"""
Constructor from dictionary mapping attribute to value.
"""
self.__dict__.update(trip_list_dict)
#: Direction is one of :py:attr:`PathSet.DIR_OUTBOUND` or :py:attr:`PathSet.DIR_INBOUND`
#: Preferred time is a datetime.time object
if trip_list_dict[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == "arrival":
self.direction = PathSet.DIR_OUTBOUND
self.pref_time = trip_list_dict[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME]
self.pref_time_min = trip_list_dict[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN]
elif trip_list_dict[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == "departure":
self.direction = PathSet.DIR_INBOUND
self.pref_time = trip_list_dict[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME]
self.pref_time_min = trip_list_dict[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN]
else:
raise Exception("Don't understand trip_list %s: %s" % (Passenger.TRIP_LIST_COLUMN_TIME_TARGET, str(trip_list_dict)))
#: Dict of path-num -> { cost:, probability:, states: [List of (stop_id, stop_state)]}
self.pathdict = {}
def goes_somewhere(self):
"""
Does this path go somewhere? Does the destination differ from the origin?
"""
return (self.__dict__[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID] != self.__dict__[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID])
def path_found(self):
"""
Was a a transit path found from the origin to the destination with the constraints?
"""
return len(self.pathdict) > 0
def num_paths(self):
"""
Number of paths in the PathSet
"""
return len(self.pathdict)
def reset(self):
"""
Delete my states, something went wrong and it won't work out.
"""
self.pathdict = []
@staticmethod
def set_user_class(trip_list_df, new_colname):
"""
Adds a column called user_class by applying the configured user class function.
"""
trip_list_df[new_colname] = trip_list_df.apply(PathSet.CONFIGURED_FUNCTIONS[PathSet.USER_CLASS_FUNCTION], axis=1)
@staticmethod
def verify_weight_config(modes_df, output_dir, routes, capacity_constraint, trip_list_df):
"""
Verify that we have complete weight configurations for the user classes and modes in the given DataFrame.
Trips with invalid weight configurations will be dropped from the trip list and warned about.
The parameter mode_df is a dataframe with the user_class, demand_mode_type and demand_mode combinations
found in the demand file.
If *capacity_constraint* is true, make sure there's an at_capacity weight on the transit supply mode links
to enforce it.
Returns updated trip_list_df.
"""
(verify, error_str) = PathSet.verify_weights(PathSet.WEIGHTS_DF)
# Join - make sure that all demand combinations (user class, purpose, demand mode type and demand mode) are configured
weight_check = pd.merge(left=modes_df,
right=PathSet.WEIGHTS_DF,
on=[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE],
how='left')
FastTripsLogger.debug("demand_modes x weights: \n%s" % weight_check.to_string())
FastTripsLogger.debug("trip_list_df head=\n%s" % str(trip_list_df.head()))
# If something is missing, warn and remove those trips
null_supply_mode_weights = weight_check.loc[pd.isnull(weight_check[PathSet.WEIGHTS_COLUMN_SUPPLY_MODE])]
if len(null_supply_mode_weights) > 0:
# warn
FastTripsLogger.warn("The following user_class, demand_mode_type, demand_mode combinations exist in the demand file but are missing from the weight configuration:")
FastTripsLogger.warn("\n%s" % null_supply_mode_weights.to_string())
# remove those trips -- need to do it one demand mode type at a time
null_supply_mode_weights = null_supply_mode_weights[[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE]]
null_supply_mode_weights["to_remove"] = 1
for demand_mode_type in [PathSet.STATE_MODE_ACCESS, PathSet.STATE_MODE_EGRESS, PathSet.STATE_MODE_TRIP]:
remove_trips = null_supply_mode_weights.loc[null_supply_mode_weights[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]==demand_mode_type].copy()
if len(remove_trips) == 0: continue
remove_trips.rename(columns={PathSet.WEIGHTS_COLUMN_DEMAND_MODE:"%s_mode" % demand_mode_type}, inplace=True)
remove_trips.drop([PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE], axis=1, inplace=True)
FastTripsLogger.debug("Removing for \n%s" % remove_trips)
trip_list_df = pd.merge(left = trip_list_df,
right = remove_trips,
how = "left")
FastTripsLogger.debug("Removing\n%s" % trip_list_df.loc[pd.notnull(trip_list_df["to_remove"])])
# keep only those not flagged to_remove
trip_list_df = trip_list_df.loc[pd.isnull(trip_list_df["to_remove"])]
trip_list_df.drop(["to_remove"], axis=1, inplace=True)
# demand_mode_type and demand_modes implicit to all travel : xfer walk, xfer wait, initial wait
user_classes = modes_df[[PathSet.WEIGHTS_COLUMN_USER_CLASS, PathSet.WEIGHTS_COLUMN_PURPOSE]].drop_duplicates().reset_index()
implicit_df = pd.DataFrame({ PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE:[ 'transfer'],
PathSet.WEIGHTS_COLUMN_DEMAND_MODE :[ 'transfer'],
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE :[ 'transfer'] })
user_classes['key'] = 1
implicit_df['key'] = 1
implicit_df = pd.merge(left=user_classes, right=implicit_df, on='key')
implicit_df.drop(['index','key'], axis=1, inplace=True)
# FastTripsLogger.debug("implicit_df: \n%s" % implicit_df)
weight_check = pd.merge(left=implicit_df, right=PathSet.WEIGHTS_DF,
on=[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE],
how='left')
FastTripsLogger.debug("implicit demand_modes x weights: \n%s" % weight_check.to_string())
if pd.isnull(weight_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME]).sum() > 0:
error_str += "\nThe following user_class, purpose, demand_mode_type, demand_mode, supply_mode combinations exist in the demand file but are missing from the weight configuration:\n"
error_str += weight_check.loc[pd.isnull(weight_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME])].to_string()
error_str += "\n\n"
# transfer penalty check
tp_index = pd.DataFrame({ PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE:['transfer'],
PathSet.WEIGHTS_COLUMN_DEMAND_MODE :['transfer'],
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE :['transfer'],
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME :['transfer_penalty']})
uc_purp_index = PathSet.WEIGHTS_DF[[PathSet.WEIGHTS_COLUMN_USER_CLASS, PathSet.WEIGHTS_COLUMN_PURPOSE]].drop_duplicates()
FastTripsLogger.debug("uc_purp_index: \n%s" % uc_purp_index)
# these are all the transfer penalties we have
transfer_penaltes = pd.merge(left=tp_index, right=PathSet.WEIGHTS_DF, how='left')
FastTripsLogger.debug("transfer_penaltes: \n%s" % transfer_penaltes)
transfer_penalty_check = pd.merge(left=uc_purp_index, right=transfer_penaltes, how='left')
FastTripsLogger.debug("transfer_penalty_check: \n%s" % transfer_penalty_check)
# missing transfer penalty
if pd.isnull(transfer_penalty_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME]).sum() > 0:
error_str += "\nThe following user class x purpose are missing a transfer penalty:\n"
error_str += transfer_penalty_check.loc[pd.isnull(transfer_penalty_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME])].to_string()
error_str += "\n\n"
bad_pen = transfer_penalty_check.loc[transfer_penalty_check[PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE] < PathSet.MIN_TRANSFER_PENALTY]
if len(bad_pen) > 0:
error_str += "\nThe following user class x purpose path weights have invalid (too small) transfer penalties. MIN=(%f)\n" % PathSet.MIN_TRANSFER_PENALTY
error_str += bad_pen.to_string()
error_str += "\nConfigure smaller min_transfer_penalty AT YOUR OWN RISK since this will make path generation slow/unreliable.\n\n"
# If *capacity_constraint* is true, make sure there's an at_capacity weight on the transit supply mode links
# to enforce it.
if capacity_constraint:
# see if it's here already -- we don't know how to handle that...
at_capacity = PathSet.WEIGHTS_DF.loc[ PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "at_capacity" ]
if len(at_capacity) > 0:
error_str += "\nFound at_capacity path weights explicitly set when about to set these for hard capacity constraints.\n"
error_str += at_capacity.to_string()
error_str += "\n\n"
else:
# set it for all user_class x transit x demand_mode x supply_mode
transit_weights_df = PathSet.WEIGHTS_DF.loc[PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE] == PathSet.STATE_MODE_TRIP,
[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE]].copy()
transit_weights_df.drop_duplicates(inplace=True)
transit_weights_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME ] = "at_capacity"
transit_weights_df[PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE] = PathSet.HUGE_COST
transit_weights_df[PathSet.WEIGHTS_GROWTH_TYPE] = PathSet.CONSTANT_GROWTH_MODEL
FastTripsLogger.debug("Adding capacity-constraint weights:\n%s" % transit_weights_df.to_string())
PathSet.WEIGHTS_DF = pd.concat([PathSet.WEIGHTS_DF, transit_weights_df], axis=0)
PathSet.WEIGHTS_DF.sort_values(by=[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE,
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME], inplace=True)
if len(error_str) > 0:
FastTripsLogger.fatal(error_str)
sys.exit(2)
# add mode numbers to weights DF for relevant rows
PathSet.WEIGHTS_DF = routes.add_numeric_mode_id(PathSet.WEIGHTS_DF,
id_colname=PathSet.WEIGHTS_COLUMN_SUPPLY_MODE,
numeric_newcolname=PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
warn=True) # don't fail if some supply modes are configured but not used, they may be for future runs
FastTripsLogger.debug("PathSet weights: \n%s" % PathSet.WEIGHTS_DF)
export_columns = [PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME,
PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE,
PathSet.WEIGHTS_GROWTH_TYPE,
PathSet.WEIGHTS_GROWTH_LOG_BASE,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID]
PathSet.WEIGHTS_DF.reindex(columns=export_columns).to_csv(os.path.join(output_dir,PathSet.OUTPUT_WEIGHTS_FILE),
columns=export_columns,
sep=" ", index=False)
# add placeholder weights (ivt weight) for fares - one for each user_class, purpose, transit demand mode
# these will be updated based on the person's value of time in calculate_cost()
fare_weights = PathSet.WEIGHTS_DF.loc[ (PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]==PathSet.STATE_MODE_TRIP) &
(PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME ]== "in_vehicle_time_min")]
fare_weights = fare_weights[[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME,
PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE]].copy().drop_duplicates()
fare_weights[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME ] = "fare" # SIM_COL_PAX_FARE
PathSet.WEIGHTS_DF = PathSet.WEIGHTS_DF.append(fare_weights)
FastTripsLogger.debug("PathSet.WEIGHTS_DF with fare weights: \n%s" % PathSet.WEIGHTS_DF)
return trip_list_df
@staticmethod
def verify_weights(weights):
# First, verify required columns are found
error_str = ""
weight_cols = list(weights.columns.values)
FastTripsLogger.debug("verify_weight_config:\n%s" % weights.to_string())
if (PathSet.WEIGHTS_COLUMN_USER_CLASS not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_USER_CLASS)
if (PathSet.WEIGHTS_COLUMN_PURPOSE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_PURPOSE)
if (PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE)
if (PathSet.WEIGHTS_COLUMN_DEMAND_MODE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_DEMAND_MODE)
if (PathSet.WEIGHTS_COLUMN_SUPPLY_MODE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_SUPPLY_MODE)
if (PathSet.WEIGHTS_COLUMN_WEIGHT_NAME not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_WEIGHT_NAME)
if (PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE)
if (PathSet.WEIGHTS_GROWTH_TYPE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_GROWTH_TYPE)
constant_exp_slice = weights.loc[
weights[PathSet.WEIGHTS_GROWTH_TYPE].isin(
[PathSet.CONSTANT_GROWTH_MODEL, PathSet.EXP_GROWTH_MODEL]),
]
logarithmic_slice = weights.loc[
weights[PathSet.WEIGHTS_GROWTH_TYPE] == PathSet.LOGARITHMIC_GROWTH_MODEL,
]
logistic_slice = weights.loc[
weights[PathSet.WEIGHTS_GROWTH_TYPE] == PathSet.LOGISTIC_GROWTH_MODEL,
]
# Verify that no extraneous values are set for constant and exponential functions
if not pd.isnull(constant_exp_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOG_BASE,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID,
], axis='columns')).values.all():
error_str += 'Linear or Exponential qualifier includes unnecessary modifier(s)\n'
if not pd.isnull(logarithmic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID,
], axis='columns')).values.all():
error_str += 'Logarithmic qualifier includes unnecessary modifier(s)\n'
if not pd.isnull(logistic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOG_BASE,
], axis='columns')).values.all():
error_str += 'Logistic qualifier includes log_base modifier\n'
if not pd.notnull(logarithmic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOG_BASE,
],
axis='columns')).values.all():
error_str += 'Logarithmic qualifier missing necessary log_base modifier\n'
if not pd.notnull(logistic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID,
], axis='columns')).values.all():
error_str += 'Logistic qualifier missing necessary modifiers\n'
if error_str:
error_str = '\n-------Errors: pathweight_ft.txt---------------\n' + error_str
return (not error_str), error_str
def __str__(self):
"""
Readable string version of the path.
Note: If inbound trip, then the states are in reverse order (egress to access)
"""
ret_str = "Dict vars:\n"
for k,v in self.__dict__.items():
ret_str += "%30s => %-30s %s\n" % (str(k), str(v), str(type(v)))
# ret_str += PathSet.states_to_str(self.states, self.direction)
return ret_str
@staticmethod
def write_paths(passengers_df, output_dir):
"""
Write the assigned paths to the given output file.
:param passengers_df: Passenger paths assignment results
:type passengers_df: :py:class:`pandas.DataFrame` instance
:param output_dir: Output directory
:type output_dir: string
"""
# get trip information -- board stops, board trips and alight stops
passenger_trips = passengers_df.loc[passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRIP].copy()
ptrip_group = passenger_trips.groupby([Passenger.PERSONS_COLUMN_PERSON_ID, Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# these are Series
board_stops_str = ptrip_group.A_id.apply(lambda x:','.join(x))
board_trips_str = ptrip_group.trip_id.apply(lambda x:','.join(x))
alight_stops_str= ptrip_group.B_id.apply(lambda x:','.join(x))
board_stops_str.name = 'board_stop_str'
board_trips_str.name = 'board_trip_str'
alight_stops_str.name = 'alight_stop_str'
# get walking times
walk_links = passengers_df.loc[(passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_ACCESS )| \
(passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRANSFER)| \
(passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_EGRESS )].copy()
walk_links['linktime_str'] = walk_links.pf_linktime.apply(lambda x: "%.2f" % (x/np.timedelta64(1,'m')))
walklink_group = walk_links[['person_id','trip_list_id_num','linktime_str']].groupby(['person_id','trip_list_id_num'])
walktimes_str = walklink_group.linktime_str.apply(lambda x:','.join(x))
# aggregate to one line per person_id, trip_list_id
print_passengers_df = passengers_df[['person_id','trip_list_id_num','pathmode','A_id','B_id',Passenger.PF_COL_PAX_A_TIME]].groupby(['person_id','trip_list_id_num']).agg(
{'pathmode' :'first', # path mode
'A_id' :'first', # origin
'B_id' :'last', # destination
Passenger.PF_COL_PAX_A_TIME :'first' # start time
})
# put them all together
print_passengers_df = pd.concat([print_passengers_df,
board_stops_str,
board_trips_str,
alight_stops_str,
walktimes_str], axis=1)
print_passengers_df.reset_index(inplace=True)
print_passengers_df.sort_values(by=['trip_list_id_num'], inplace=True)
print_passengers_df.rename(columns=
{'pathmode' :'mode',
'A_id' :'originTaz',
'B_id' :'destinationTaz',
Passenger.PF_COL_PAX_A_TIME :'startTime_time',
'board_stop_str' :'boardingStops',
'board_trip_str' :'boardingTrips',
'alight_stop_str' :'alightingStops',
'linktime_str' :'walkingTimes'}, inplace=True)
print_passengers_df['startTime'] = print_passengers_df['startTime_time'].apply(Util.datetime64_formatter)
print_passengers_df = print_passengers_df[['trip_list_id_num','person_id','mode','originTaz','destinationTaz','startTime',
'boardingStops','boardingTrips','alightingStops','walkingTimes']]
print_passengers_df.to_csv(os.path.join(output_dir, PathSet.PATHS_OUTPUT_FILE), sep="\t", index=False)
# passengerId mode originTaz destinationTaz startTime boardingStops boardingTrips alightingStops walkingTimes
@staticmethod
def write_path_times(passengers_df, output_dir):
"""
Write the assigned path times to the given output file.
:param passengers_df: Passenger path links
:type passengers_df: :py:class:`pandas.DataFrame` instance
:param output_dir: Output directory
:type output_dir: string
"""
passenger_trips = passengers_df.loc[passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRIP].copy()
###### TODO: this is really catering to output format; an alternative might be more appropriate
from .Assignment import Assignment
passenger_trips.loc[:, 'board_time_str'] = passenger_trips[Assignment.SIM_COL_PAX_BOARD_TIME ].apply(Util.datetime64_formatter)
passenger_trips.loc[:,'arrival_time_str'] = passenger_trips[Passenger.PF_COL_PAX_A_TIME].apply(Util.datetime64_formatter)
passenger_trips.loc[:, 'alight_time_str'] = passenger_trips[Assignment.SIM_COL_PAX_ALIGHT_TIME].apply(Util.datetime64_formatter)
# Aggregate (by joining) across each passenger + path
ptrip_group = passenger_trips.groupby([Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# these are Series
board_time_str = ptrip_group['board_time_str' ].apply(lambda x:','.join(x))
arrival_time_str = ptrip_group['arrival_time_str'].apply(lambda x:','.join(x))
alight_time_str = ptrip_group['alight_time_str' ].apply(lambda x:','.join(x))
# Aggregate other fields across each passenger + path
pax_exp_df = passengers_df.groupby([Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]).agg(
{# 'pathmode' :'first', # path mode
'A_id' :'first', # origin
'B_id' :'last', # destination
Passenger.PF_COL_PAX_A_TIME :'first', # start time
Passenger.PF_COL_PAX_B_TIME :'last', # end time
# TODO: cost needs to be updated for updated dwell & travel time
# 'cost' :'first', # total travel cost is calculated for the whole path
})
# Put them together and return
assert(len(pax_exp_df) == len(board_time_str))
pax_exp_df = pd.concat([pax_exp_df,
board_time_str,
arrival_time_str,
alight_time_str], axis=1)
# print pax_exp_df.to_string(formatters={'A_time':Assignment.datetime64_min_formatter,
# 'B_time':Assignment.datetime64_min_formatter}
# reset columns
print_pax_exp_df = pax_exp_df.reset_index()
print_pax_exp_df.sort_values(by=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID], inplace=True)
print_pax_exp_df['A_time_str'] = print_pax_exp_df[Passenger.PF_COL_PAX_A_TIME].apply(Util.datetime64_formatter)
print_pax_exp_df['B_time_str'] = print_pax_exp_df[Passenger.PF_COL_PAX_B_TIME].apply(Util.datetime64_formatter)
# rename columns
print_pax_exp_df.rename(columns=
{#'pathmode' :'mode',
'A_id' :'originTaz',
'B_id' :'destinationTaz',
'A_time_str' :'startTime',
'B_time_str' :'endTime',
'arrival_time_str' :'arrivalTimes',
'board_time_str' :'boardingTimes',
'alight_time_str' :'alightingTimes',
# TODO: cost needs to be updated for updated dwell & travel time
# 'cost' :'travelCost',
}, inplace=True)
# reorder
print_pax_exp_df = print_pax_exp_df[[
Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
#'mode',
'originTaz',
'destinationTaz',
'startTime',
'endTime',
'arrivalTimes',
'boardingTimes',
'alightingTimes',
# 'travelCost',
]]
times_out = open(os.path.join(output_dir, PathSet.PATH_TIMES_OUTPUT_FILE), 'w')
print_pax_exp_df.to_csv(times_out,
sep="\t", float_format="%.2f", index=False)
@staticmethod
def split_transit_links(pathset_links_df, veh_trips_df, stops):
"""
Splits the transit links to their component links and returns.
So if a transit trip goes from stop A to D but passes stop B and C in between, the
row A->D will now be replaced by rows A->B, B->C, and C->D.
Adds "split_first" bool - True on the first veh link only
Note that this does *not* renumber the linknum field.
"""
from .Assignment import Assignment
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("split_transit_links: pathset_links_df (%d) trace\n%s" % (len(pathset_links_df),
pathset_links_df.loc[pathset_links_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string()))
FastTripsLogger.debug("split_transit_links: pathset_links_df columns\n%s" % str(pathset_links_df.dtypes))
veh_links_df = Trip.linkify_vehicle_trips(veh_trips_df, stops)
veh_links_df["linkmode"] = "transit"
FastTripsLogger.debug("split_transit_links: veh_links_df\n%s" % veh_links_df.head(20).to_string())
# join the pathset links with the vehicle links
drop_cols = []
merge_cols = [Passenger.PF_COL_LINK_MODE,
Route.ROUTES_COLUMN_MODE,
Trip.TRIPS_COLUMN_ROUTE_ID,
Trip.TRIPS_COLUMN_TRIP_ID]
if Trip.TRIPS_COLUMN_TRIP_ID_NUM in pathset_links_df.columns.values:
merge_cols.append(Trip.TRIPS_COLUMN_TRIP_ID_NUM)
if Route.ROUTES_COLUMN_MODE_NUM in pathset_links_df.columns.values:
merge_cols.append(Route.ROUTES_COLUMN_MODE_NUM)
path2 = pd.merge(left =pathset_links_df,
right =veh_links_df,
on =merge_cols,
how ="left",
suffixes=["","_veh"])
path2["split_first"] = False
# delete anything irrelevant -- so keep non-transit links, and transit links WITH valid sequences
path2 = path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]!=Route.MODE_TYPE_TRANSIT) |
( (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT) &
(path2["A_seq_veh"]>=path2["A_seq"]) &
(path2["B_seq_veh"]<=path2["B_seq"]) ) ]
# These are the new columns -- incorporate them
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_seq_veh"]==path2["A_seq"]), "split_first"] = True
# A_arrival_time datetime64[ns] => A time for intermediate links
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_A_TIME ] = path2["A_arrival_time"]
# no waittime, boardtime, missed_xfer except on first link
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_WAIT_TIME ] = None
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_BOARD_TIME ] = None
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_MISSED_XFER] = 0
# no alighttime except on last link
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["B_id"]!=path2["B_id_veh"]), Assignment.SIM_COL_PAX_ALIGHT_TIME] = None
# route_id_num float64 => ignore
# A_id_veh object => A_id
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_id" ] = path2["A_id_veh"]
# A_id_num_veh float64 => A_id_num
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_id_num" ] = path2["A_id_num_veh"]
# A_seq_veh float64 => A_seq
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_seq" ] = path2["A_seq_veh"]
if "A_lat_veh" in path2.columns.values:
# A_lat_veh float64 => A_lat
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_lat" ] = path2["A_lat_veh"]
# A_lon_veh float64 => A_lon
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_lon" ] = path2["A_lon_veh"]
# drop these later
drop_cols.extend(["A_lat_veh","A_lon_veh"])
# B_id_veh object => B_id
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_id" ] = path2["B_id_veh"]
# B_id_num_veh float64 => B_id_num
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_id_num" ] = path2["B_id_num_veh"]
# B_seq_veh float64 => B_seq
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_seq" ] = path2["B_seq_veh"]
# B_arrival_time datetime64[ns] => new_B_time
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "new_B_time" ] = path2["B_arrival_time"]
# B_departure_time datetime64[ns] => ignore
if "B_lat_veh" in path2.columns.values:
# B_lat_veh float64 => B_lat
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_lat" ] = path2["B_lat_veh"]
# B_lon_veh float64 => B_lon
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_lon" ] = path2["B_lon_veh"]
# drop these later
drop_cols.extend(["B_lat_veh","B_lon_veh"])
# update the link time
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT,Assignment.SIM_COL_PAX_LINK_TIME] = path2[Assignment.SIM_COL_PAX_B_TIME] - path2[Assignment.SIM_COL_PAX_A_TIME]
# update transit distance
Util.calculate_distance_miles(path2, "A_lat","A_lon","B_lat","B_lon", "transit_distance")
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT,Assignment.SIM_COL_PAX_DISTANCE ] = path2["transit_distance"]
# revert these back to ints
path2[["A_id_num","B_id_num","A_seq","B_seq"]] = path2[["A_id_num","B_id_num","A_seq","B_seq"]].astype(int)
# we're done with the fields - drop them
drop_cols.extend(["transit_distance", "route_id_num",
"A_id_veh","A_id_num_veh","A_seq_veh","A_arrival_time","A_departure_time",
"B_id_veh","B_id_num_veh","B_seq_veh","B_arrival_time","B_departure_time"])
path2.drop(drop_cols, axis=1, inplace=True)
# renumber linknum? Let's not bother
# trace
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("split_transit_links: path2 (%d) trace\n%s" % (len(path2),
path2.loc[path2[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string()))
FastTripsLogger.debug("split_transit_links: path2 columns\n%s" % str(path2.dtypes))
return path2
@staticmethod
def calculate_cost(STOCH_DISPERSION, pathset_paths_df, pathset_links_df, veh_trips_df,
trip_list_df, routes, tazs, transfers, stops=None, reset_bump_iter=False, is_skimming=False):
"""
This is equivalent to the C++ Path::calculateCost() method. Would it be faster to do it in C++?
It would require us to package up the networks and paths and send back and forth. :p
I think if we can do it using vectorized pandas operations, it should be fast, but we can compare/test.
It's also messier to have this in two places. Maybe we should delete it from the C++; the overlap calcs are only in here right now.
Returns pathset_paths_df with additional columns, Assignment.SIM_COL_PAX_FARE, Assignment.SIM_COL_PAX_COST, Assignment.SIM_COL_PAX_PROBABILITY, Assignment.SIM_COL_PAX_LOGSUM
And pathset_links_df with additional columns, Assignment.SIM_COL_PAX_FARE, Assignment.SIM_COL_PAX_FARE_PERIOD, Assignment.SIM_COL_PAX_COST and Assignment.SIM_COL_PAX_DISTANCE
"""
from .Assignment import Assignment
# if these are here already, remove them since we'll recalculate them
if Assignment.SIM_COL_PAX_COST in list(pathset_paths_df.columns.values):
pathset_paths_df.drop([Assignment.SIM_COL_PAX_COST,
Assignment.SIM_COL_PAX_LNPS,
Assignment.SIM_COL_PAX_PROBABILITY,
Assignment.SIM_COL_PAX_LOGSUM], axis=1, inplace=True)
pathset_links_df.drop([Assignment.SIM_COL_PAX_COST,
Assignment.SIM_COL_PAX_DISTANCE], axis=1, inplace=True)
# leaving this in for writing to CSV for debugging but I could take it out
pathset_paths_df.drop(["logsum_component"], axis=1, inplace=True)
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("calculate_cost: pathset_links_df trace\n%s" % str(pathset_links_df.loc[pathset_links_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]))
FastTripsLogger.debug("calculate_cost: trip_list_df trace\n%s" % str(trip_list_df.loc[trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]))
# Add fares -- need stop zones first if they're not there.
# We only need to do this once per pathset.
# todo -- could remove non-transit links for this?
FastTripsLogger.debug("calculate_cost columns:\n%s" % str(list(pathset_links_df.columns.values)))
if "A_zone_id" not in list(pathset_links_df.columns.values):
assert(stops is not None)
pathset_links_df = stops.add_stop_zone_id(pathset_links_df, "A_id", "A_zone_id")
pathset_links_df = stops.add_stop_zone_id(pathset_links_df, "B_id", "B_zone_id")
# This needs to be done fresh each time since simulation might change the board times and therefore the fare periods
pathset_links_df = routes.add_fares(pathset_links_df, is_skimming)
# base this on pathfinding distance
pathset_links_df[Assignment.SIM_COL_PAX_DISTANCE] = pathset_links_df[Passenger.PF_COL_LINK_DIST]
pathset_links_to_use = pathset_links_df
if PathSet.OVERLAP_SPLIT_TRANSIT:
pathset_links_to_use = PathSet.split_transit_links(pathset_links_df, veh_trips_df, stops)
else:
pathset_links_to_use["split_first"] = True # all transit links are first
# First, we need user class, purpose, demand modes, and value of time
pathset_links_cost_df = pd.merge(left =pathset_links_to_use,
right=trip_list_df[
#Passenger.TRIP_LIST_COLUMN_PERSON_ID,
#Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.get_id_columns(is_skimming) +
[
Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.TRIP_LIST_COLUMN_VOT,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE,
]],
how ="left",
on =Passenger.get_id_columns(is_skimming))
#[Passenger.PERSONS_COLUMN_PERSON_ID, Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# linkmode = demand_mode_type. Set demand_mode for the links
pathset_links_cost_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = None
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_ACCESS , PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_ACCESS_MODE ]
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_EGRESS , PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_EGRESS_MODE ]
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_TRIP , PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE]
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_TRANSFER, PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = "transfer"
# Verify that it's set for every link
missing_demand_mode = pd.isnull(pathset_links_cost_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE]).sum()
assert(missing_demand_mode == 0)
# drop the individual mode columns, we have what we need
pathset_links_cost_df.drop([Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE], axis=1, inplace=True)
# if bump_iter doesn't exist or if it needs to be reset
if reset_bump_iter or Assignment.SIM_COL_PAX_BUMP_ITER not in pathset_links_cost_df:
pathset_links_cost_df[Assignment.SIM_COL_PAX_BUMP_ITER] = -1
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("calculate_cost: pathset_links_cost_df trace\n%s" % str(pathset_links_cost_df.loc[pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]))
# Inner join with the weights - now each weight has a row
cost_df = pd.merge(left =pathset_links_cost_df,
right =PathSet.WEIGHTS_DF,
left_on =[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.PF_COL_LINK_MODE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
Passenger.TRIP_LIST_COLUMN_MODE],
right_on=[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE],
how ="inner")
# update the fare weight placeholder (ivt pathweight - utils per min)) based on value of time (currency per hour)
# since generalized cost is in utils, (ivt utils/min)x(60 min/1 hour)x(hour/vot currency) is the weight (utils/currency)
cost_df.loc[ cost_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME]==Assignment.SIM_COL_PAX_FARE, "weight_value" ] *= (60.0/cost_df[Passenger.TRIP_LIST_COLUMN_VOT])
if (len(Assignment.TRACE_IDS) > 0) and not is_skimming:
FastTripsLogger.debug("calculate_cost: cost_df\n%s" % str(cost_df.loc[cost_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].sort_values([
Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM,
Passenger.PF_COL_PATH_NUM,Passenger.PF_COL_LINK_NUM]).head(20)))
# NOW we split it into 3 lists -- access/egress, transit, and transfer
# This is because they will each be joined to tables specific to those kinds of mode categories, and so we don't want all the transit nulls on the other tables, etc.
cost_columns = list(cost_df.columns.values)
cost_df["var_value"] = np.nan # This means unset
cost_accegr_df = cost_df.loc[(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_ACCESS )|(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_EGRESS)]
cost_trip_df = cost_df.loc[(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRIP )]
cost_transfer_df = cost_df.loc[(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRANSFER)]
del cost_df
##################### First, handle Access/Egress link costs
for accegr_type in ["walk","bike","drive"]:
# make copies; we don't want to mess with originals
if accegr_type == "walk":
link_df = tazs.walk_df.copy()
mode_list = TAZ.WALK_MODE_NUMS
elif accegr_type == "bike":
mode_list = TAZ.BIKE_MODE_NUMS
# not supported yet
continue
else:
link_df = tazs.drive_df.copy()
mode_list = TAZ.DRIVE_MODE_NUMS
FastTripsLogger.debug("Access/egress link_df %s\n%s" % (accegr_type, link_df.head().to_string()))
if len(link_df) == 0:
continue
# format these with A & B instead of TAZ and Stop
link_df.reset_index(inplace=True)
link_df["A_id_num"] = -1
link_df["B_id_num"] = -1
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.ACCESS_MODE_NUMS), "A_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM ]
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.ACCESS_MODE_NUMS), "B_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_STOP_NUM]
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.EGRESS_MODE_NUMS), "A_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_STOP_NUM]
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.EGRESS_MODE_NUMS), "B_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM ]
link_df.drop([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM, TAZ.WALK_ACCESS_COLUMN_STOP_NUM], axis=1, inplace=True)
assert(len(link_df.loc[link_df["A_id_num"] < 0]) == 0)
FastTripsLogger.debug("%s link_df =\n%s" % (accegr_type, link_df.head().to_string()))
# Merge access/egress with walk|bike|drive access/egress information
cost_accegr_df = pd.merge(left = cost_accegr_df,
right = link_df,
on = ["A_id_num",
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
"B_id_num"],
how = "left")
# rename new columns so it's clear it's for walk|bike|drive
for colname in list(link_df.select_dtypes(include=['float64','int64']).columns.values):
# don't worry about join columns
if colname in ["A_id_num", PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM, "B_id_num"]: continue
# rename the rest
new_colname = "%s %s" % (colname, accegr_type)
cost_accegr_df.rename(columns={colname:new_colname}, inplace=True)
# use it, if relevant
cost_accegr_df.loc[ (cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == colname)&
(cost_accegr_df[PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM].isin(mode_list)), "var_value"] = cost_accegr_df[new_colname]
# Access/egress needs passenger trip departure, arrival and time_target
cost_accegr_df = pd.merge(left =cost_accegr_df,
right=trip_list_df[
#[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
#Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.get_id_columns(is_skimming) + [
Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME,
Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME,
Passenger.TRIP_LIST_COLUMN_TIME_TARGET,
]],
how ="left",
on =Passenger.get_id_columns(is_skimming))
#[Passenger.PERSONS_COLUMN_PERSON_ID, Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# drop links that are irrelevant based on departure time for access links, or arrival time for egress links
cost_accegr_df["check_time"] = cost_accegr_df[Assignment.SIM_COL_PAX_A_TIME] # departure time for access
cost_accegr_df.loc[ cost_accegr_df[TAZ.MODE_COLUMN_MODE_NUM].isin(TAZ.EGRESS_MODE_NUMS), "check_time" ] = cost_accegr_df[Assignment.SIM_COL_PAX_B_TIME] # arrival time for egress
cost_accegr_df["check_time"] = (cost_accegr_df["check_time"] - Assignment.NETWORK_BUILD_DATE_START_TIME)/np.timedelta64(1,'m')
# it's only drive links we need to check
cost_accegr_df["to_drop"] = False
if "%s %s" % (TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN, "drive") in cost_accegr_df.columns.values:
cost_accegr_df.loc[ cost_accegr_df[TAZ.MODE_COLUMN_MODE_NUM].isin(TAZ.DRIVE_MODE_NUMS)&
((cost_accegr_df["check_time"] < cost_accegr_df["%s %s" % (TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN, "drive")])|
(cost_accegr_df["check_time"] >= cost_accegr_df["%s %s" % (TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN, "drive")])), "to_drop"] = True
# if len(Assignment.TRACE_IDS) > 0:
# FastTripsLogger.debug("cost_accegr_df=\n%s\ndtypes=\n%s" % (cost_accegr_df.loc[cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]].to_string(), str(cost_accegr_df.dtypes)))
FastTripsLogger.debug("Dropping %d rows from cost_accegr_df" % cost_accegr_df["to_drop"].sum())
cost_accegr_df = cost_accegr_df.loc[ cost_accegr_df["to_drop"]==False ]
cost_accegr_df.drop(["check_time","to_drop"], axis=1, inplace=True)
# penalty for arriving before preferred arrival time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN )&
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN) &
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS) &
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'departure'), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN )& \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS)& \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'arrival'), "var_value"] = (cost_accegr_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME] - cost_accegr_df[Passenger.PF_COL_PAX_B_TIME])/np.timedelta64(1,'m')
# arrive early is not negative - that would be arriving late
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN)&(cost_accegr_df["var_value"] < 0), "var_value"] = 0.0
# penalty for departing after preferred departure time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN) &
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN )&
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS)&
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'arrival'), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN) &
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS) &
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'departure'), "var_value"] = (cost_accegr_df[Passenger.PF_COL_PAX_A_TIME] - cost_accegr_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME])/ np.timedelta64(1, 'm')
# depart late is not negative - that would be departing early
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN)&(cost_accegr_df["var_value"] < 0), "var_value"] = 0.0
# constant growth = exponential growth with 0 percent growth rate
# depart before preferred or arrive after preferred means the passenger just missed something important
# Arrive late only impacts the egress link, so set the var_value equal to zero for the access link
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN ) & \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS), "var_value"] = 0.0
# Arrive late only impacts those that have a preferred arrival time. If preferred departure time,
# set arrive late equal to zero. --This could have been done with previous line, but it would
# look ugly mixing and matching 'and' and 'or'.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN) & \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_DEPARTURE), "var_value"] = 0.0
# Calculate how late the person arrives after preferred time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN )& \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS)& \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL), "var_value"] = \
(cost_accegr_df[Passenger.PF_COL_PAX_B_TIME] - cost_accegr_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME])/np.timedelta64(1,'m')
# If arrived before preferred time, set the arrive late field to zero. You don't get a
# discount for arriving early.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN) & \
(cost_accegr_df['var_value'] < 0), "var_value"] = 0
# preferred delay_min - departure means want to depart after that time
# Depart early only impacts the access link, so set the var_value equal to zero for the egress link
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN )& \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS), "var_value"] = 0.0
# Depart early only impacts those that have a preferred departure time. If preferred arrive time,
# set depart early equal to zero. --This could have been done with previous line, but it would
# look ugly mixing and matching 'and' and 'or'.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN) & \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL), "var_value"] = 0.0
# Calculate how early the person departs before the preferred time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN) & \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS) & \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_DEPARTURE), "var_value"] = \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME] - cost_accegr_df[Passenger.PF_COL_PAX_A_TIME])/ np.timedelta64(1, 'm')
# If departing after preferred time, set the depart early field to zero. You don't get a
# discount for taking your time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN) & \
(cost_accegr_df['var_value'] < 0), "var_value"] = 0
assert 0 == cost_accegr_df[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME].isin([PathSet.WEIGHT_NAME_DEPART_EARLY_MIN, PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN])) & \
(cost_accegr_df['var_value'].isnull())].shape[0]
assert 0 == cost_accegr_df[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME].isin([PathSet.WEIGHT_NAME_DEPART_EARLY_MIN, PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN])) & \
(cost_accegr_df['var_value']<0)].shape[0]
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("cost_accegr_df trace\n%s\ndtypes=\n%s" % (cost_accegr_df.loc[cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string(), str(cost_accegr_df.dtypes)))
missing_accegr_costs = cost_accegr_df.loc[ pd.isnull(cost_accegr_df["var_value"]) ]
error_accegr_msg = "Missing %d out of %d access/egress var_value values" % (len(missing_accegr_costs), len(cost_accegr_df))
FastTripsLogger.debug(error_accegr_msg)
if len(missing_accegr_costs) > 0:
error_accegr_msg += "\n%s" % missing_accegr_costs.head(10).to_string()
FastTripsLogger.fatal(error_accegr_msg)
##################### Next, handle Transit Trip link costs
# set the fare var_values for split_first only
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "fare")&(cost_trip_df["split_first"]==True), "var_value"] = cost_trip_df[Assignment.SIM_COL_PAX_FARE]
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "fare")&(cost_trip_df["split_first"]==False), "var_value"] = 0
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("cost_trip_df trace\n%s\ndtypes=\n%s" % (cost_trip_df.loc[cost_trip_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string(), str(cost_trip_df.dtypes)))
# if there's a board time, in_vehicle_time = new_B_time - board_time
# otherwise, in_vehicle_time = B time - A time (for when we split)
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "in_vehicle_time_min")&pd.notnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = \
(cost_trip_df[Assignment.SIM_COL_PAX_B_TIME] - cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME])/np.timedelta64(1,'m')
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "in_vehicle_time_min")& pd.isnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = \
(cost_trip_df[Assignment.SIM_COL_PAX_B_TIME] - cost_trip_df[Assignment.SIM_COL_PAX_A_TIME])/np.timedelta64(1,'m')
# if in vehicle time is less than 0 then off by 1 day error
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "in_vehicle_time_min")&(cost_trip_df["var_value"]<0), "var_value"] = cost_trip_df["var_value"] + (24*60)
# if there's a board time, wait time = board_time - A time
# otherwise, wait time = 0 (for when we split transit links)
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "wait_time_min")&pd.notnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = \
(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME] - cost_trip_df[Assignment.SIM_COL_PAX_A_TIME])/np.timedelta64(1,'m')
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "wait_time_min")& pd.isnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = 0
# which overcap column to use?
overcap_col = Trip.SIM_COL_VEH_OVERCAP
if Assignment.MSA_RESULTS and Trip.SIM_COL_VEH_MSA_OVERCAP in list(cost_trip_df.columns.values): overcap_col = Trip.SIM_COL_VEH_MSA_OVERCAP
# at cap is a binary, 1 if overcap >= 0 and they're not one of the lucky few that boarded
cost_trip_df["at_capacity"] = 0.0
if Assignment.SIM_COL_PAX_BOARD_STATE in list(cost_trip_df.columns.values):
cost_trip_df.loc[ (cost_trip_df[overcap_col] >= 0)&
(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_STATE] != "board_easy")&
(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_STATE] != "boarded"), "at_capacity" ] = 1.0
else:
cost_trip_df.loc[ (cost_trip_df[overcap_col] >= 0) , "at_capacity" ] = 1.0
cost_trip_df.loc[cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "at_capacity" , "var_value"] = cost_trip_df["at_capacity"]
cost_trip_df.loc[cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "overcap" , "var_value"] = cost_trip_df[overcap_col]
# overcap shouldn't be negative
cost_trip_df.loc[ (cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "overcap")&(cost_trip_df["var_value"]<0), "var_value"] = 0.0
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("cost_trip_df trace\n%s\ndtypes=\n%s" % (cost_trip_df.loc[cost_trip_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string(), str(cost_trip_df.dtypes)))
missing_trip_costs = cost_trip_df.loc[ | pd.isnull(cost_trip_df["var_value"]) | pandas.isnull |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from tqdm import tqdm
import time
import pandas as pd
import camping_server2.config as config
import configparser
class OgcampScraping:
def __init__(self):
self.driver = webdriver.Chrome()
self.driver.set_window_size(1200, 1200)
self.driver.get("https://www.5gcamp.com/./?mod=login&iframe=Y")
def get_data(self):
config = configparser.ConfigParser()
config.read('../keys/data.ini')
ogcamp = config['API_KEYS']
usr = ogcamp['OGCAMP_ID']
pwd = ogcamp['OGCAMP_PW']
id_input = self.driver.find_element_by_name('id')
pw_input = self.driver.find_element_by_name('pw')
submit = self.driver.find_element_by_css_selector('#btn_login')
id_input.send_keys(usr)
pw_input.send_keys(pwd)
submit.click()
time.sleep(2)
def get_url(self):
url_links = []
levels = []
for i in tqdm(range(1, 19)):
self.driver.get(f"https://www.5gcamp.com/?c=5g&p={i}")
items = self.driver.find_elements_by_xpath('//*[@id="camplist"]/div[2]/div[2]/div/div[2]/div')
level = [item.find_element_by_css_selector('li.star > p').text.strip() for item in items]
links = [item.find_element_by_css_selector('a').get_attribute('href') for item in items]
url_links.extend(links)
levels.extend(level)
self.df2 = pd.DataFrame(levels)
return url_links
def get_details(self):
links = self.get_url()
datas = []
for link in tqdm(links):
time.sleep(1)
self.driver.get(link)
title = self.driver.find_element_by_css_selector('#campcontents > div.viewheader > h3').text
addr = self.driver.find_element_by_css_selector(
'#vContent > h4.chead.address.first.fblack > a.clipboardCopy').text
# 위도경도 값 예외처리
try:
lats = self.driver.find_element_by_css_selector('#vContent > p > em').text
lat = lats.split('경도')[0].strip()
lon = '경도' + lats.split('경도')[1].strip()
except NoSuchElementException:
lats = "정보없음"
lat = "정보없음"
lon = "정보없음"
envs = self.driver.find_elements_by_css_selector('#vContent > div.facilities > div > div')
env = [env.find_element_by_css_selector("p.f_name").text for env in envs]
en = ', '.join(env)
try:
desc = self.driver.find_element_by_class_name('short_cont').text
except NoSuchElementException:
desc = "정보없음"
photoes = self.driver.find_elements_by_css_selector('#vContent > div.photos > div')
photos = [photos.find_element_by_css_selector('img').get_attribute('src') for photos in photoes]
photo = ', '.join(photos)
# 리뷰 크롤링을 위해 스크롤 조정 후 예외처리
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
try:
self.driver.find_element_by_xpath('//*[@id="moreComment"]/a').click
time.sleep(1)
reviewes = self.driver.find_elements_by_class_name('commentbox')
reviews = [reviews.find_element_by_class_name('cont').text for reviews in reviewes]
review = [review.replace("\n", ' ') for review in reviews]
review = ', '.join(review)
except NoSuchElementException:
reviewes = self.driver.find_elements_by_class_name('commentbox')
reviews = [reviews.find_element_by_class_name('cont').text for reviews in reviewes]
review = [review.replace("\n", ' ') for review in reviews]
review = ', '.join(review)
data = {
"title": title,
"addr": addr,
"lat": lat,
"lon": lon,
"environment": en,
"desc": desc,
"photo": photo,
"review": review,
}
datas.append(data)
self.df3 = | pd.concat([self.df, self.df2], 1) | pandas.concat |
from collections import defaultdict
import pandas as pd
import random
from anytree import Node, NodeMixin, LevelOrderIter, RenderTree
#This is the maximum size of the prefix, suffix and substring that will be counted
MAX_STR_SIZE = 8
#Class to denote the node for a generic summary data structure.
class SummaryDSNode(NodeMixin):
def __init__(self, name, parent=None, children=None):
super(SummaryDSNode, self).__init__()
self.name = name
self.frequency = 1
self.parent = parent
self.char_to_children_dict = {}
self.transition_probabilities = {}
#Compute transition probabilities based on Eq 5 of the paper
def update_transition_probabilities(self, root_node):
k = len(self.children)
total_frequency = sum([child.frequency for child in self.children])
numerator, denominator = k , k+1
if self.parent == root_node:
numerator = k + 1
else:
self.transition_probabilities[self.parent] = 1.0 / denominator
fraction = (numerator / denominator )
for child in self.children:
probability = 0.0
if total_frequency > 0:
probability = (child.frequency / total_frequency) * fraction
self.transition_probabilities[child] = probability
#This class represents the entire generic summary data structure.
#Using a common for ease of coding.
#It can be replaced with more performant ones such as prefix trees, suffix trees etc.
class SummaryDataStructure:
#string_generator_fn is a function that takes a string as input
#and outputs a list of "substrings" of interest.
#for e.g. all prefixes, suffixes,
#max_str_size: will be the largest prefix, substring, suffix string that will be created
#split_words: whether to ignore spaces in a string.
#if split_words is true, then "a b" will be inserted as two words a b .. else one word with space.
def __init__(self, string_generator_fn, max_str_size=MAX_STR_SIZE, split_words=True):
self.string_generator_fn = string_generator_fn
self.max_str_size = max_str_size
self.split_words = split_words
self.root_node = SummaryDSNode('')
def insert_string(self, string):
substrings_of_interest = self.string_generator_fn(string)
for substring in substrings_of_interest:
cur_node = self.root_node
for index, char in enumerate(substring):
if char in cur_node.char_to_children_dict:
cur_node = cur_node.char_to_children_dict[char]
else:
new_node = SummaryDSNode(substring[:index+1], parent=cur_node)
cur_node.char_to_children_dict[char] = new_node
cur_node = new_node
#Increment the frequency of the last node
cur_node.frequency = cur_node.frequency + 1
def update_summary_ds_from_file(self, input_file_name):
with open(input_file_name) as f:
for line in f:
strings = [line.strip()]
if self.split_words:
strings = line.strip().split()
for string in strings:
self.insert_string(string)
#returns a data frame with all the strings in the summary data structure and its frequencies
def get_selectivities(self):
string_frequency_dict = defaultdict(int)
for node in LevelOrderIter(self.root_node):
if node.is_root == False:
string_frequency_dict[node.name] = max(1, node.frequency - 1)
df = pd.DataFrame.from_dict(string_frequency_dict, orient='index')
df.index.name = "string"
df.columns = ["selectivity"]
return df
def update_transition_probabilities(self):
for node in LevelOrderIter(self.root_node):
if node.is_root == False:
node.update_transition_probabilities(self.root_node)
#For each node,
# get the transition probabilities of going to other nodes
# use it to get num_triplets_per_node positive random samples using weighted sampling
# get num_triplets_per_node random strings as negative samples
def get_triplets(self, random_seed=1234, num_triplets_per_node=4):
random.seed(random_seed)
self.update_transition_probabilities()
#Get all the strings - it is needed to get dissimilar strings
all_strings = [node.name for node in LevelOrderIter(self.root_node) if not node.is_root]
total_nodes = len(all_strings)
all_triplets = []
for node in LevelOrderIter(self.root_node):
#The root node is ornamental!
if node.is_root:
continue
candidate_nodes = []
candidate_probabilities = []
#get all the neighbors of this node
for other_node in node.transition_probabilities.keys():
candidate_nodes.append(other_node.name)
probability = node.transition_probabilities[other_node]
candidate_probabilities.append(probability)
for other_node in node.transition_probabilities.keys():
for other_other_node in other_node.transition_probabilities.keys():
candidate_nodes.append(other_other_node.name)
#probability of reaching other_other_node from node
new_probability = probability * other_node.transition_probabilities[other_other_node]
candidate_probabilities.append(new_probability)
if len(candidate_nodes) == 0:
negatives = random.choices(population=all_strings, k=num_triplets_per_node)
anchor = node.name
for index in range(num_triplets_per_node):
all_triplets.append( (anchor, anchor, negatives[index]) )
continue
#normalize probabilities if needed
candidate_probabilities_sum = sum(candidate_probabilities)
candidate_probabilities = [elem/candidate_probabilities_sum for elem in candidate_probabilities]
#Do a weighted random sampling of to get #num_triplets_per_node nodes
# from candidates based num_triplets_per_node
candidate_probabilities = list(candidate_probabilities)
positives = random.choices(population=candidate_nodes, k=num_triplets_per_node, weights=candidate_probabilities)
negatives = random.choices(population=all_strings, k=num_triplets_per_node)
anchor = node.name
for index in range(num_triplets_per_node):
all_triplets.append( (anchor, positives[index], negatives[index]) )
df = pd.DataFrame(all_triplets, columns = ["Anchor", "Positive", "Negative"])
return df
def print_tree(self):
for pre, fill, node in RenderTree(self.root_node):
print("%s%s:%d" % (pre, node.name, node.frequency))
def get_all_prefixes(string, max_size=MAX_STR_SIZE):
return [string[:j] for j in range(1, min(max_size, len(string)) + 1)]
def get_all_suffixes(string, max_size=MAX_STR_SIZE):
return [string[-j:] for j in range(1, min(max_size, len(string)) + 1)]
def get_all_substrings(string, max_size=MAX_STR_SIZE):
arr = []
n = len(string)
for i in range(0,n):
for j in range(i,n):
if (j+1 - i) <= max_size:
arr.append(string[i:(j+1)])
return arr
#Naive way to compute all strings of interest that avoids the use of summary data structures
def aggregate_strings_of_interest(input_file_name, string_agg_fn,
max_size=MAX_STR_SIZE, split_words=True, output_file_name=None):
string_frequency_dict = defaultdict(int)
with open(input_file_name) as f:
for line in f:
words = [line.strip()]
if split_words:
words = line.strip().split()
for word in words:
strings = string_agg_fn(word, max_size)
for string in strings:
string_frequency_dict[string] += 1
df = | pd.DataFrame.from_dict(string_frequency_dict, orient='index') | pandas.DataFrame.from_dict |
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.express as px
import os
import pandas as pd
from datetime import timedelta
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
server = app.server
# Clean the data and generate plots
def main(confirmed, deaths, recovered, population):
output = {}
# Get population data
pop = pd.read_csv(population, skiprows=2, header=1)
pop = pop[['Country Name', '2019']]
pop = pop.replace('United States', 'US')
pop = pop.replace('Russian Federation', 'Russia')
pop.columns = ['country', 'population']
# declare columns to retain after melting the data
required_columns = ['Province/State', 'Country/Region', 'Lat', 'Long']
# Clean and save the files
dfc = import_data(confirmed, "confirmed", required_columns)
dfd = import_data(deaths, "deaths", required_columns)
dfr = import_data(recovered, "recovered", required_columns)
# Change to datetime
dfc["date"] = pd.to_datetime(dfc["date"])
dfd["date"] = pd.to_datetime(dfd["date"])
dfr["date"] = pd.to_datetime(dfr["date"])
#Merge everything
df = pd.merge(dfc, dfd, "left")
df = pd.merge(df, dfr, "left")
df = df.rename(columns={"confirmed" : "Confirmed", "deaths":"Deaths", "recovered":"Recovered", "Country/Region":"country"})
# Group by to country granularity
df = df.groupby(['country', 'date']).sum().reset_index()
# Join population data
df = df.merge(pop, 'left', left_on='country', right_on='country')
# Gete list of all countries
list_of_countries = df['country']
output['list_of_countries'] = list_of_countries
latest_date = df["date"].max()
start_date = df["date"].min()
dfl = df[df.date == latest_date]
# Plot latest summary
df_summary = dfl.sum()
df_summary = pd.DataFrame(df_summary).T.drop(["country", "Lat", "Long", "population"], axis=1).T
df_summary.columns = ["total"]
df_summary["total"] = df_summary["total"].astype("int64")
df_summary = df_summary.reset_index().sort_values(by="total", ascending=False)
fig_summary = px.pie(df_summary, names='index', values='total', color_discrete_sequence=px.colors.qualitative.Dark2)
# fig_summary.update_traces(texttemplate='%{text:.2s}', textposition='outside',
# marker=dict(color='#ff7f0e'))
fig_summary.update_traces(textposition='inside', textinfo='label+value')
fig_summary.update_layout(xaxis_showgrid=False, yaxis_showgrid=False, xaxis_visible=True, yaxis_visible=False, xaxis_title_text="",
bargroupgap=0, bargap=0.1, plot_bgcolor="#1E1E1E", paper_bgcolor="#1E1E1E", font=dict(color="white"),
margin= {'t': 0, 'b': 10, 'l': 10, 'r': 0}, showlegend=False, width=200, height=200)
# Get last 24 hours cases reported
yesterday_date = df["date"].max() - pd.DateOffset(1)
dfy = df[df.date == yesterday_date]
dfl_change = pd.merge(dfl, dfy, "left", on=["country", "Lat", "Long"], suffixes=('_today', '_yesterday'))
dfl_change["Change in 24 hours, Confirmed"] = dfl_change["Confirmed_today"] - dfl_change["Confirmed_yesterday"]
dfl_change["Change in 24 hours, Deaths"] = dfl_change["Deaths_today"] - dfl_change["Deaths_yesterday"]
dfl_change["Change in 24 hours, Recovered"] = dfl_change["Recovered_today"] - dfl_change["Recovered_yesterday"]
dfl_change_summary = dfl_change.groupby(["country"]).sum()[["Confirmed_today", "Confirmed_yesterday", "Deaths_today","Recovered_today",
"Change in 24 hours, Confirmed", "Change in 24 hours, Deaths",
"Change in 24 hours, Recovered"]]
# Save results to output dictionary
output['df'] = df
output['dfl'] = dfl
output['dfl_change_summary'] = dfl_change_summary
output['fig_summary'] = fig_summary
output['latest_date'] = latest_date.date()
output['start_date'] = start_date.date()
return output
def import_data(data, val_name, required_columns):
df = pd.read_csv(data)
melt_columns = [col for col in df.columns if col not in required_columns]
df_melted = | pd.melt(df, id_vars=required_columns, var_name="date", value_vars = melt_columns, value_name=val_name) | pandas.melt |
# module model
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
from sklearn import metrics, ensemble, model_selection
from sklearn.preprocessing import MinMaxScaler
from math import sqrt
import numpy as np
import datetime
from dateutil import relativedelta
import os
import io
import json
import base64
from xgboost import XGBRegressor
import tensorflow as tf
from tensorflow import keras
from statsmodels.tsa.ar_model import AutoReg
np.random.seed(42)
tf.random.set_seed(42)
def buildProphet(train_data_path, test_data_path):
print("\nBuilding Prophet model ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
dd = pd.DataFrame(daily)
dd.reset_index(inplace=True)
dd.columns = ['ds','y']
mR = Prophet(daily_seasonality=False)
mR.fit(dd)
futureR=mR.make_future_dataframe(periods=365*5)
forecastR=mR.predict(futureR)
rmse = -1.0
if len(test_data_path) > 0:
dft = pd.read_csv(test_data_path)
dft['TIMESTAMP'] = dft['TIMESTAMP'].astype('datetime64')
dft.set_index('TIMESTAMP',inplace=True)
dft_start_datetime = min(dft.index)
dft_end_datetime = max(dft.index)
actual_mean = dft['RENEWABLES_PCT'].resample('24H').mean()
predicted_mean = forecastR.loc[(forecastR['ds'] >= dft_start_datetime) & (forecastR['ds'] <= dft_end_datetime)]
predicted_mean.set_index('ds', inplace=True)
actual_mean = actual_mean[min(predicted_mean.index):]
mse = metrics.mean_squared_error(actual_mean, predicted_mean.yhat)
rmse = sqrt(mse)
print(str.format("Prophet RMSE: {:.2f}", rmse))
return rmse
def predictProphet(data_path,periods):
print("\nTraining prophet model with full dataset ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
dd = pd.DataFrame(daily)
dd.reset_index(inplace=True)
dd.columns = ['ds','y']
m = Prophet(daily_seasonality=False)
m.fit(dd)
future=m.make_future_dataframe(periods=periods)
print(str.format("\nPredicting with prophet model for {0} days ({1} years) ...",periods, int(periods/365)))
plt.subplot(1,1,1)
forecast=m.predict(future)
fig = m.plot(forecast,ylabel='Renewable Power Production %', xlabel='Date')
plt.suptitle('\nCA Predicted Renewable Power Production %')
#plt.title('\nCA Predicted Renewable Power Production %')
axes = plt.gca()
wd = os.path.dirname(data_path) + '/../images'
os.makedirs(wd, exist_ok=True)
fig.savefig(wd + '/prediction-prophet.png')
forecast.rename(columns={'ds':'TIMESTAMP'}, inplace=True)
forecast.set_index('TIMESTAMP',inplace=True)
prediction = pd.DataFrame({'RENEWABLES_PCT_MEAN':forecast['yhat'].resample('1Y').mean(),'RENEWABLES_PCT_LOWER':forecast['yhat_lower'].resample('1Y').mean(),'RENEWABLES_PCT_UPPER':forecast['yhat_upper'].resample('1Y').mean()})
return prediction
def rmse_calc(actual,predict):
predict = np.array(predict)
actual = np.array(actual)
distance = predict - actual
square_distance = distance ** 2
mean_square_distance = square_distance.mean()
score = np.sqrt(mean_square_distance)
return score
def transformDataset(df):
# Add pct from one and two days ago as well as difference in yesterday-1 and yesterday-1
df['YESTERDAY'] = df['RENEWABLES_PCT'].shift()
df['YESTERDAY_DIFF'] = df['YESTERDAY'].diff()
df['YESTERDAY-1']=df['YESTERDAY'].shift()
df['YESTERDAY-1_DIFF'] = df['YESTERDAY-1'].diff()
df=df.dropna()
x_train=pd.DataFrame({'YESTERDAY':df['YESTERDAY'],'YESTERDAY_DIFF':df['YESTERDAY_DIFF'],'YESTERDAY-1':df['YESTERDAY-1'],'YESTERDAY-1_DIFF':df['YESTERDAY-1_DIFF']})
y_train = df['RENEWABLES_PCT']
return x_train,y_train
def buildRandomForestRegression(train_data_path,test_data_path):
print("\nBuilding Random Forest Regression Model ...")
print("Preparing training dataset ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
df = df.resample('1M').mean()
x_train, y_train = transformDataset(df)
print("Preparing testing dataset ...")
dt = pd.read_csv(test_data_path)
dt['TIMESTAMP'] = dt['TIMESTAMP'].astype('datetime64')
dt.set_index('TIMESTAMP',inplace=True)
x_test, y_test = transformDataset(dt)
print("Searching for best regressor ...")
model = ensemble.RandomForestRegressor()
param_search = {
'n_estimators': [100],
'max_features': ['auto'],
'max_depth': [10]
}
tscv = model_selection.TimeSeriesSplit(n_splits=2)
rmse_score = metrics.make_scorer(rmse_calc, greater_is_better = False)
gsearch = model_selection.GridSearchCV(estimator=model, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch.fit(x_train, y_train)
best_score = gsearch.best_score_
best_model = gsearch.best_estimator_
y_true = y_test.values
print("Predicting with best regressor ...")
y_pred = best_model.predict(x_test)
mse = metrics.mean_squared_error(y_true, y_pred)
rmse = sqrt(mse)
print(str.format("Random Forest Regression RMSE: {:.2f}", rmse))
return rmse
def predictRandomForestRegression(data_path,periods):
print("\nTraining Random Forest Regression model with full dataset ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
dfmean = df.resample('1M').mean()
dfmin = df.resample('1M').min()
dfmax = df.resample('1M').max()
x_train,y_train = transformDataset(dfmean)
xmin_train, ymin_train = transformDataset(dfmin)
xmax_train, ymax_train = transformDataset(dfmax)
model = ensemble.RandomForestRegressor()
model_min = ensemble.RandomForestRegressor()
model_max = ensemble.RandomForestRegressor()
param_search = {
'n_estimators': [100],
'max_features': ['auto'],
'max_depth': [10]
}
tscv = model_selection.TimeSeriesSplit(n_splits=2)
rmse_score = metrics.make_scorer(rmse_calc, greater_is_better = False)
gsearch = model_selection.GridSearchCV(estimator=model, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch_min = model_selection.GridSearchCV(estimator=model_min, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch_max = model_selection.GridSearchCV(estimator=model_max, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch.fit(x_train, y_train)
gsearch_min.fit(xmin_train, ymin_train)
gsearch_max.fit(xmax_train, ymax_train)
best_score = gsearch.best_score_
best_model = gsearch.best_estimator_
best_model_min = gsearch_min.best_estimator_
best_model_max = gsearch_max.best_estimator_
print("\nPredicting with Random Forest regressor ...")
prediction = pd.DataFrame(columns=['TIMESTAMP','RENEWABLES_PCT'])
l = len(x_train)
x_pred = x_train.iloc[[l-1]]
y_pred = best_model.predict(x_pred)
xmin_pred = xmin_train.iloc[[l-1]]
ymin_pred = best_model_min.predict(xmin_pred)
xmax_pred = xmax_train.iloc[[l-1]]
ymax_pred = best_model_max.predict(xmax_pred)
prediction = prediction.append({'TIMESTAMP':x_pred.index[0],'RENEWABLES_PCT_MEAN':y_pred[0],'RENEWABLES_PCT_LOWER':ymin_pred[0],'RENEWABLES_PCT_UPPER':ymax_pred[0]}, ignore_index=True)
for i in range(1,periods):
ti = prediction.iloc[i-1]['TIMESTAMP'] + pd.offsets.DateOffset(months=1)
xi_pred = pd.DataFrame({'YESTERDAY':y_pred,'YESTERDAY_DIFF':y_pred-x_pred['YESTERDAY'],'YESTERDAY-1':x_pred['YESTERDAY'],'YESTERDAY-1_DIFF':x_pred['YESTERDAY_DIFF']})
yi_pred = best_model.predict(xi_pred)
xmini_pred = pd.DataFrame({'YESTERDAY':ymin_pred,'YESTERDAY_DIFF':ymin_pred-xmin_pred['YESTERDAY'],'YESTERDAY-1':xmin_pred['YESTERDAY'],'YESTERDAY-1_DIFF':xmin_pred['YESTERDAY_DIFF']})
ymini_pred = best_model.predict(xmini_pred)
xmaxi_pred = pd.DataFrame({'YESTERDAY':ymax_pred,'YESTERDAY_DIFF':ymax_pred-xmax_pred['YESTERDAY'],'YESTERDAY-1':xmax_pred['YESTERDAY'],'YESTERDAY-1_DIFF':xmax_pred['YESTERDAY_DIFF']})
ymaxi_pred = best_model.predict(xmaxi_pred)
prediction = prediction.append({'TIMESTAMP':ti,'RENEWABLES_PCT_MEAN':yi_pred[0],'RENEWABLES_PCT_LOWER':ymini_pred[0],'RENEWABLES_PCT_UPPER':ymaxi_pred[0]}, ignore_index=True)
x_pred = xi_pred
y_pred = yi_pred
xmin_pred = xmini_pred
ymin_pred = ymini_pred
xmax_pred = xmaxi_pred
ymax_pred = ymaxi_pred
prediction.set_index('TIMESTAMP',inplace=True)
prediction = prediction.resample('1Y').mean()
p = prediction.plot()
p.set_title('CA Predicted Renewables % by Random Forest Regression')
p.set_ylabel('Renewables %')
wd = os.path.dirname(data_path) + '/../images'
os.makedirs(wd, exist_ok=True)
plt.savefig(wd + '/prediction-randomforest.png')
return prediction
# transform a time series dataset into a supervised learning dataset
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = pd.concat(cols, axis=1)
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.values
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, :-1], test[i, -1]
# fit model on history and make a prediction
yhat = xgboost_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f, predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = model_selection.mean_absolute_error(test[:, -1], predictions)
return error, test[:, 1], predictions
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test, :], data[-n_test:, :]
# fit an xgboost model and make a one step prediction
def xgboost_forecast(train, testX):
# transform list into array
train = np.asarray(train)
# split into input and output columns
trainX, trainy = train[:, :-1], train[:, -1]
# fit model
model = XGBRegressor(objective='reg:squarederror', n_estimators=1000)
model.fit(trainX, trainy)
# make a one-step prediction
yhat = model.predict(np.asarray([testX]))
return yhat[0]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, :-1], test[i, -1]
# fit model on history and make a prediction
yhat = xgboost_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f, predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = metrics.mean_squared_error(test[:, -1], predictions)
return error, test[:, -1], predictions
def buildXGBoostRegression(train_data_path,test_data_path):
print("\nBuilding XGBoost Regression model ...")
df = pd.read_csv(train_data_path)
dt = pd.read_csv(test_data_path)
df = df.append(dt)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
dfmean = df.resample('1Y').mean()
dmean = series_to_supervised(dfmean[['RENEWABLES_PCT']], n_in=2, n_out=1, dropnan=True)
# transform list into array
mse, y, yhat = walk_forward_validation(dmean, 8)
rmse = sqrt(mse)
print(str.format("XGBoostRegression RMSE: {:.2f}", rmse))
return rmse
def buildLSTM(train_data_path,test_data_path):
print("\nBuilding LSTM Model ...")
time_steps = 3
print("Preparing training dataset ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
df = df[['RENEWABLES_PCT']]
df = df.resample('1M').mean()
scaler = MinMaxScaler()
df = scaler.fit_transform(df)
scaling_model = scaler.fit(df)
df = scaling_model.transform(df)
daily_train = pd.DataFrame(df, columns=['RENEWABLES_PCT']).reset_index()
x_train, y_train = create_lstm_dataset(pd.DataFrame({'ROW':range(0,len(daily_train)),'RENEWABLES_PCT':daily_train['RENEWABLES_PCT']}), daily_train['RENEWABLES_PCT'], time_steps)
print("Preparing testing dataset ...")
dt = pd.read_csv(test_data_path)
dt['TIMESTAMP'] = dt['TIMESTAMP'].astype('datetime64')
dt.set_index('TIMESTAMP',inplace=True)
dt = dt[['RENEWABLES_PCT']]
daily_test = dt.resample('1M').mean()
daily_test = scaling_model.transform(dt)
daily_test = pd.DataFrame(daily_test, columns=['RENEWABLES_PCT']).reset_index()
x_test, y_test = create_lstm_dataset(pd.DataFrame({'ROW':range(0,len(daily_test)),'RENEWABLES_PCT':daily_test['RENEWABLES_PCT']}), daily_test['RENEWABLES_PCT'], time_steps)
model = keras.Sequential()
model.add(keras.layers.LSTM(units=128,input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(keras.layers.Dense(units=1))
model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(0.001))
history = model.fit(x_train, y_train, epochs=50, batch_size=12, validation_split=0.1, verbose=1, shuffle=False)
y_pred = model.predict(x_test)
y_pred = scaling_model.inverse_transform(y_pred)
mse = metrics.mean_squared_error(y_test, y_pred)
rmse = sqrt(mse)
print(str.format("LSTM RMSE: {:.2f}", rmse))
return rmse
def create_lstm_dataset(x, y, time_steps=1):
xs, ys = [], []
for i in range(len(x) - time_steps):
v = x.iloc[i:(i + time_steps)].values
xs.append(v)
ys.append(y.iloc[i + time_steps])
return np.array(xs), np.array(ys)
def buildARModel(train_data_path, test_data_path):
print("\nBuilding Auto-Regression Model ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP', inplace=True)
df = df.resample('1M').mean()
train_series = df['RENEWABLES_PCT']
dt = | pd.read_csv(test_data_path) | pandas.read_csv |
"""
The io module provides support for reading and writing diffusion profile data
and diffusion coefficients data to csv files.
"""
import numpy as np
import pandas as pd
from scipy.interpolate import splev
from pydiffusion.core import DiffProfile, DiffSystem
import matplotlib.pyplot as plt
import threading
# To solve the problem when matplotlib figure freezes when input used
# https://stackoverflow.com/questions/34938593/matplotlib-freezes-when-input-used-in-spyder
prompt = False
promptText = ""
done = False
waiting = False
response = ""
regular_input = input
def threadfunc():
global prompt
global done
global waiting
global response
while not done:
if prompt:
prompt = False
response = regular_input(promptText)
waiting = True
def ask_input(text):
global waiting
global prompt
global promptText
promptText = text
prompt = True
while not waiting:
plt.pause(1.0)
waiting = False
return response
def ita_start():
global done
done = False
thread = threading.Thread(target=threadfunc)
thread.start()
def ita_finish():
global done
done = True
def save_csv(name=None, profile=None, diffsys=None):
"""
Save diffusion data as csv file.
Parameters
----------
name : str
csv file name, default name is the profile name of diffsys name.
profile : DiffProfile
DiffProfile to save.
diffsys : DiffSystem
DiffSystem to save. diffsys can be saved by itself or with profile.
Examples
--------
>>> save_csv('data.csv', profile, dsys)
"""
if profile is None and diffsys is None:
raise ValueError('No data entered')
if name is not None and not name.endswith('.csv'):
name += str(name)+'.csv'
elif profile is None:
Xr, fD = diffsys.Xr, diffsys.Dfunc
X, DC = np.array([]), np.array([])
for i in range(diffsys.Np):
Xnew = np.linspace(Xr[i, 0], Xr[i, 1], 30)
Dnew = np.exp(splev(Xnew, fD[i]))
X = np.append(X, Xnew)
DC = np.append(DC, Dnew)
data = | pd.DataFrame({'X': X, 'DC': DC}) | pandas.DataFrame |
import torch
import pandas as pd
from typing import Tuple
from sentence_transformers import SentenceTransformer, CrossEncoder
from backend.recommenders.base_recommender import BaseRecommender
class YouTubeRecommender(BaseRecommender):
def __init__(self, corpus: pd.DataFrame, feature_to_column_mapping: dict = dict()):
if not feature_to_column_mapping:
feature_to_column_mapping = {
"search": "block",
"explore": ["video_title", "video_description"],
}
super(YouTubeRecommender, self).__init__(
corpus=corpus, feature_to_column_mapping=feature_to_column_mapping
)
def search(
self,
question: str,
encoder: SentenceTransformer,
cross_encoder: CrossEncoder,
top_k: int,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
semantic search
"""
column = self.feature_to_column_mapping["search"]
assert (
column in self.corpus_embeddings_dict
), f"Embeddings for [{column}] not found, please fit [{column}] first using the .fit() call"
question_embedding = self._encode(question, encoder)
hits = self._semamtic_search(question_embedding, column, top_k)
# score all retrieved passages with the cross_encoder
cross_inp = [[question, self.corpus[column][hit["corpus_id"]]] for hit in hits]
cross_scores = cross_encoder.predict(cross_inp, activation_fct=torch.sigmoid)
# sort results by the cross-encoder scores
for idx in range(len(cross_scores)):
hits[idx]["cross-score"] = cross_scores[idx]
hits[idx]["snippet"] = self.corpus[column][hits[idx]["corpus_id"]].replace(
"\n", " "
)
# return hits and recommendations
hits = (
| pd.DataFrame(hits) | pandas.DataFrame |
from urllib.parse import urlparse
import pytest
import pandas as pd
import numpy as np
from visions.core.implementations.types import *
from visions.application.summaries.summary import CompleteSummary
@pytest.fixture(scope="class")
def summary():
return CompleteSummary()
def validate_summary_output(test_series, visions_type, correct_output, summary):
trial_output = summary.summarize_series(test_series, visions_type)
for metric, result in correct_output.items():
assert metric in trial_output, "Metric `{metric}` is missing".format(
metric=metric
)
assert (
trial_output[metric] == result
), "Expected value {result} for metric `{metric}`, got {output}".format(
result=result, metric=metric, output=trial_output[metric]
)
def test_integer_summary(summary, visions_type=visions_integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_integer_missing_summary(summary, visions_type=visions_integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
"na_count": 0,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_float_missing_summary(summary, visions_type=visions_float):
test_series = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, np.nan])
correct_output = {
"n_unique": 5,
"median": 2,
"mean": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 6,
"n_zeros": 1,
"na_count": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_bool_missing_summary(summary, visions_type=visions_bool):
test_series = pd.Series([True, False, True, True, np.nan])
correct_output = {"n_records": 5, "na_count": 1}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_categorical_missing_summary(summary, visions_type=visions_categorical):
test_series = pd.Series(
pd.Categorical(
[True, False, np.nan, "test"],
categories=[True, False, "test", "missing"],
ordered=True,
)
)
correct_output = {
"n_unique": 3,
"n_records": 4,
"na_count": 1,
"category_size": 4,
"missing_categorical_values": True,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_complex_missing_summary(summary, visions_type=visions_complex):
test_series = | pd.Series([0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j, np.nan]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import math
import glob
import re
import os.path
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from pathlib import Path
from io import StringIO
from pyproj import Transformer
from itertools import takewhile
from scipy import stats
import multiprocessing as mp
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from shapely.geometry import box, Point, LineString
def standardise_source(df):
# Dictionary containing method values to rename
remap_dict = {'emery': 'emery/levelling',
'levelling': 'emery/levelling',
'dunefill': np.NaN,
'rtk gps': 'gps',
'photogrammetry': 'aerial photogrammetry',
'stereo photogrammtery': 'aerial photogrammetry',
'ads80': 'aerial photogrammetry',
'photos': 'aerial photogrammetry',
'total station': 'total station',
'total station\t': 'total station',
'laser scanning': 'terrestrial laser scanning',
'satellite': 'satellite',
'gps rtk gps': 'gps'}
# Set all values to lower case for easier conversion
df['source'] = df.source.str.lower()
# Replace values
df['source'] = df.source.replace(remap_dict)
def to_vector(df,
fname='test.shp',
x='x',
y='y',
crs='EPSG:3577',
output_crs='EPSG:3577'):
# Convert datetimes to strings
df = df.copy()
is_datetime = df.dtypes == 'datetime64[ns]'
df.loc[:, is_datetime] = df.loc[:, is_datetime].astype(str)
# Export to file
gdf = gpd.GeoDataFrame(data=df.loc[:, df.dtypes != 'datetime64[ns]'],
geometry=gpd.points_from_xy(x=df[x], y=df[y]),
crs=crs).to_crs(output_crs).to_file(fname)
return gdf
def export_eval(df, output_name, output_crs='EPSG:3577'):
from shapely.geometry import box, Point, LineString
# Extract geometries
val_points = gpd.points_from_xy(x=df.val_x, y=df.val_y)
deacl_points = gpd.points_from_xy(x=df.deacl_x, y=df.deacl_y)
df_profiles = df.groupby('id').first()
profile_lines = df_profiles.apply(
lambda x: LineString([(x.start_x, x.start_y), (x.end_x, x.end_y)]), axis=1)
# Export validation points
val_gdf = gpd.GeoDataFrame(data=df,
geometry=val_points,
crs=output_crs).to_crs('EPSG:4326')
val_gdf.to_file(f'figures/eval/{output_name}_val.geojson',
driver='GeoJSON')
# Export DEACL points
deacl_gdf = gpd.GeoDataFrame(data=df,
geometry=deacl_points,
crs=output_crs).to_crs('EPSG:4326')
deacl_gdf.to_file(f'figures/eval/{output_name}_deacl.geojson',
driver='GeoJSON')
# Export profiles
profile_gdf = gpd.GeoDataFrame(data=df_profiles,
geometry=profile_lines,
crs=output_crs).to_crs('EPSG:4326')
profile_gdf.to_file(f'figures/eval/{output_name}_profiles.geojson',
driver='GeoJSON')
def deacl_val_stats(val_dist, deacl_dist, n=None, remove_bias=False):
np.seterr(all='ignore')
# Compute difference and bias
diff_dist = val_dist - deacl_dist
bias = diff_dist.mean()
if remove_bias:
deacl_dist += bias
diff_dist = val_dist - deacl_dist
# Compute stats
if n is None:
n = len(val_dist)
else:
n = sum(n)
mae = mean_absolute_error(val_dist, deacl_dist)
rmse = mean_squared_error(val_dist, deacl_dist)**0.5
if n > 1:
corr = np.corrcoef(x=val_dist, y=deacl_dist)[0][1]
stdev = diff_dist.std()
else:
corr = np.nan
stdev = np.nan
return pd.Series({
'n': n,
'mae': f'{mae:.2f}',
'rmse': f'{rmse:.2f}',
'stdev': f'{stdev:.2f}',
'corr': f'{corr:.3f}',
'bias': f'{bias:.2f}',
}).astype(float)
def rse_tableformat(not_bias_corrected, bias_corrected, groupby='source'):
# Fix rounding and total observations
not_bias_corrected['n'] = not_bias_corrected['n'].astype(int)
not_bias_corrected[['bias', 'stdev', 'mae', 'rmse']] = not_bias_corrected[['bias', 'stdev', 'mae', 'rmse']].round(1)
not_bias_corrected['n'] = not_bias_corrected.groupby(groupby)['n'].sum()
# Move bias corrected values into brackets
not_bias_corrected['MAE (m)'] = (not_bias_corrected.mae.astype('str') + ' (' +
bias_corrected.mae.round(1).astype('str') + ')')
not_bias_corrected['RMSE (m)'] = (not_bias_corrected.rmse.astype('str') + ' (' +
bias_corrected.rmse.round(1).astype('str') + ')')
# Sort by MAE, rename columns
not_bias_corrected = (not_bias_corrected.sort_values('mae')
.drop(['mae', 'rmse'], axis=1)
.rename({'stdev': 'SD (m)', 'corr': 'Correlation', 'bias': 'Bias (m)'}, axis=1)
[['n', 'Bias (m)', 'MAE (m)', 'RMSE (m)', 'SD (m)', 'Correlation']])
return not_bias_corrected
def val_slope(profiles_df, intercept_df, datum=0, buffer=25, method='distance'):
# Join datum dist to full profile dataframe
profiles_datum_dist = (profiles_df.set_index(
['id', 'date'])[['distance', 'z']].join(intercept_df[f'{datum}_dist']))
if method == 'distance':
# Filter to measurements within distance of datum distance
beach_data = profiles_datum_dist[profiles_datum_dist.distance.between(
profiles_datum_dist[f'{datum}_dist'] - buffer,
profiles_datum_dist[f'{datum}_dist'] + buffer)]
elif method == 'height':
# Filter measurements within height of datum
beach_data = profiles_datum_dist.loc[
profiles_datum_dist.z.between(-buffer, buffer)]
# Calculate slope
beach_slope = beach_data.groupby(['id', 'date']).apply(
lambda x: stats.linregress(x=x.distance, y=x.z).slope)
return beach_slope.round(3)
def dms2dd(s):
# example: s = "0°51'56.29"
degrees, minutes, seconds = re.split('[°\'"]+', s)
if float(degrees) > 0:
dd = float(degrees) + float(minutes) / 60 + float(seconds) / (60 * 60)
else:
dd = float(degrees) - float(minutes) / 60 - float(seconds) / (60 * 60);
return dd
def dist_angle(lon, lat, dist, angle):
lon_end = lon + dist * np.sin(angle * np.pi / 180)
lat_end = lat + dist * np.cos(angle * np.pi / 180)
return pd.Series({'end_y': lat_end, 'end_x': lon_end})
def interp_intercept(x, y1, y2, reverse=False):
"""
Find the intercept of two curves, given by the same x data
References:
----------
Source: https://stackoverflow.com/a/43551544/2510900
"""
def intercept(point1, point2, point3, point4):
"""find the intersection between two lines
the first line is defined by the line between point1 and point2
the first line is defined by the line between point3 and point4
each point is an (x,y) tuple.
So, for example, you can find the intersection between
intercept((0,0), (1,1), (0,1), (1,0)) = (0.5, 0.5)
Returns: the intercept, in (x,y) format
"""
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
x = Dx / D
y = Dy / D
return x,y
L1 = line([point1[0],point1[1]], [point2[0],point2[1]])
L2 = line([point3[0],point3[1]], [point4[0],point4[1]])
R = intersection(L1, L2)
return R
try:
if isinstance(y2, (int, float)):
y2 = np.array([y2] * len(x))
if reverse:
x = x[::-1]
y1 = y1[::-1]
y2 = y2[::-1]
idx = np.argwhere(np.diff(np.sign(y1 - y2)) != 0)
xc, yc = intercept((x[idx], y1[idx]),((x[idx + 1], y1[idx + 1])),
((x[idx], y2[idx])), ((x[idx + 1], y2[idx + 1])))
return xc[0][0]
except:
return np.nan
def dist_along_transect(dist, start_x, start_y, end_x, end_y):
transect_line = LineString([(start_x, start_y), (end_x, end_y)])
distance_coords = transect_line.interpolate(dist).coords.xy
return [coord[0] for coord in distance_coords]
def waterline_intercept(x,
dist_col='distance',
x_col='x',
y_col='y',
z_col='z',
z_val=0,
debug=False):
# Extract distance and coordinates of where the z_val first
# intersects with the profile line
dist_int = interp_intercept(x[dist_col].values, x[z_col].values, z_val)
x_int = interp_intercept(x[x_col].values, x[z_col].values, z_val)
y_int = interp_intercept(x[y_col].values, x[z_col].values, z_val)
# Identify last distance where the z_value intersects the profile
rev_int = interp_intercept(x[dist_col].values, x[z_col].values, z_val,
reverse=True)
# If first and last intersects are the identical, return data.
# If not, the comparison is invalid (i.e. NaN)
if dist_int == rev_int:
if debug: print('Single intersection found')
return pd.Series({f'{z_val}_dist': dist_int,
f'{z_val}_x': x_int,
f'{z_val}_y': y_int})
else:
if debug: print('Multiple intersections returned')
return pd.Series({f'{z_val}_dist': np.NaN,
f'{z_val}_x': np.NaN,
f'{z_val}_y': np.NaN})
def reproj_crs(in_data,
in_crs,
x='x',
y='y',
out_crs='EPSG:3577'):
# Reset index to allow merging new data with original data
in_data = in_data.reset_index(drop=True)
# Reproject coords to Albers and create geodataframe
trans = Transformer.from_crs(in_crs, out_crs, always_xy=True)
coords = trans.transform(in_data[x].values, in_data[y].values)
in_data[['x', 'y']] = pd.DataFrame(zip(*coords))
return in_data
def profiles_from_dist(profiles_df,
id_col='id',
dist_col='distance',
x_col='x',
y_col='y'):
# Compute origin points for each profile
min_ids = profiles_df.groupby(id_col)[dist_col].idxmin()
start_xy = profiles_df.loc[min_ids, [id_col, x_col, y_col]]
start_xy = start_xy.rename({x_col: f'start_{x_col}',
y_col: f'start_{y_col}'},
axis=1)
# Compute end points for each profile
max_ids = profiles_df.groupby(id_col)[dist_col].idxmax()
end_xy = profiles_df.loc[max_ids, [x_col, y_col]]
# Add end coords into same dataframe
start_xy = start_xy.reset_index(drop=True)
end_xy = end_xy.reset_index(drop=True)
start_xy[[f'end_{x_col}', f'end_{y_col}']] = end_xy
return start_xy
def perpendicular_line(input_line, length):
# Generate parallel lines either side of input line
left = input_line.parallel_offset(length / 2.0, 'left')
right = input_line.parallel_offset(length / 2.0, 'right')
# Create new line between centroids of parallel line.
# This should be perpendicular to the original line
return LineString([left.centroid, right.centroid])
def generate_transects(line_geom,
length=400,
interval=200,
buffer=20):
# Create tangent line at equal intervals along line geom
interval_dists = np.arange(buffer, line_geom.length, interval)
tangent_geom = [LineString([line_geom.interpolate(dist - buffer),
line_geom.interpolate(dist + buffer)])
for dist in interval_dists]
# Convert to geoseries and remove erroneous lines by length
tangent_gs = gpd.GeoSeries(tangent_geom)
tangent_gs = tangent_gs.loc[tangent_gs.length.round(1) <= buffer * 2]
# Compute perpendicular lines
return tangent_gs.apply(perpendicular_line, length=length)
def coastal_transects(bbox,
name,
interval=200,
transect_length=400,
simplify_length=200,
transect_buffer=20,
output_crs='EPSG:3577',
coastline='../input_data/Smartline.gdb',
land_poly='/g/data/r78/rt1527/shapefiles/australia/australia/cstauscd_r.shp'):
# Load smartline
coastline_gdf = gpd.read_file(coastline, bbox=bbox).to_crs(output_crs)
coastline_geom = coastline_gdf.geometry.unary_union.simplify(simplify_length)
# Load Australian land water polygon
land_gdf = gpd.read_file(land_poly, bbox=bbox).to_crs(output_crs)
land_gdf = land_gdf.loc[land_gdf.FEAT_CODE.isin(["mainland", "island"])]
land_geom = gpd.overlay(df1=land_gdf, df2=bbox).unary_union
# Extract transects along line
geoms = generate_transects(coastline_geom,
length=transect_length,
interval=interval,
buffer=transect_buffer)
# Test if end points of transects fall in water or land
p1 = gpd.GeoSeries([Point(i.coords[0]) for i in geoms])
p2 = gpd.GeoSeries([Point(i.coords[1]) for i in geoms])
p1_within_land = p1.within(land_geom)
p2_within_land = p2.within(land_geom)
# Create geodataframe, remove invalid land-land/water-water transects
transect_gdf = gpd.GeoDataFrame(data={'p1': p1_within_land,
'p2': p2_within_land},
geometry=geoms.values,
crs=output_crs)
transect_gdf = transect_gdf[~(transect_gdf.p1 == transect_gdf.p2)]
# Reverse transects so all point away from land
transect_gdf['geometry'] = transect_gdf.apply(
lambda i: LineString([i.geometry.coords[1],
i.geometry.coords[0]])
if i.p1 < i.p2 else i.geometry, axis=1)
# Export to file
transect_gdf[['geometry']].to_file(f'input_data/coastal_transects_{name}.geojson',
driver='GeoJSON')
def coastal_transects_parallel(
regions_gdf,
interval=200,
transect_length=400,
simplify_length=200,
transect_buffer=20,
overwrite=False,
output_path='input_data/combined_transects_wadot.geojson'):
if not os.path.exists(output_path) or overwrite:
if os.path.exists(output_path):
print('Removing existing file')
os.remove(output_path)
# Generate transects for each region
print('Generating transects')
with mp.Pool(mp.cpu_count()) as pool:
for i, _ in regions_gdf.iterrows():
name = str(i).replace(' ', '').replace('/', '').lower()
pool.apply_async(coastal_transects, [
regions_gdf.loc[[i]], name, interval, transect_length,
simplify_length, transect_buffer
])
pool.close()
pool.join()
# Load regional transects and combine into a single file
print('Combining data')
transect_list = glob.glob('input_data/coastal_transects_*.geojson')
gdf = pd.concat(
[gpd.read_file(shp, ignore_index=True) for shp in transect_list])
gdf = gdf.reset_index(drop=True)
gdf['profile'] = gdf.index.astype(str)
gdf.to_file(output_path, driver='GeoJSON')
# Clean files
[os.remove(f) for f in transect_list]
def preprocess_wadot(compartment,
overwrite=True,
fname='input_data/wadot/Coastline_Movements_20190819.gdb'):
beach = str(compartment.index.item())
fname_out = f'output_data/wadot_{beach}.csv'
print(f'Processing {beach:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Read file and filter to AHD 0 shorelines
val_gdf = gpd.read_file(fname,
bbox=compartment).to_crs('EPSG:3577')
val_gdf = gpd.clip(gdf=val_gdf, mask=compartment, keep_geom_type=True)
val_gdf = val_gdf[(val_gdf.TYPE == 'AHD 0m') |
(val_gdf.TYPE == 'AHD 0m ')]
# Filter to post 1987 shorelines and set index to year
val_gdf = val_gdf[val_gdf.PHOTO_YEAR > 1987]
val_gdf = val_gdf.set_index('PHOTO_YEAR')
# If no data is returned, skip this iteration
if len(val_gdf.index) == 0:
print(f'Failed: {beach:<80}', end='\r')
return None
######################
# Generate transects #
######################
transect_gdf = gpd.read_file('input_data/combined_transects_wadot.geojson',
bbox=compartment)
transect_gdf = gpd.clip(gdf=transect_gdf, mask=compartment, keep_geom_type=True)
################################
# Identify 0 MSL intersections #
################################
output_list = []
# Select one year
for year in val_gdf.index.unique().sort_values():
# Extract validation contour
print(f'Processing {beach} {year:<80}', end='\r')
val_contour = val_gdf.loc[[year]].geometry.unary_union
# Copy transect data, and find intersects
# between transects and contour
intersect_gdf = transect_gdf.copy()
intersect_gdf['val_point'] = transect_gdf.intersection(val_contour)
to_keep = gpd.GeoSeries(intersect_gdf['val_point']).geom_type == 'Point'
intersect_gdf = intersect_gdf.loc[to_keep]
# If no data is returned, skip this iteration
if len(intersect_gdf.index) == 0:
print(f'Failed: {beach} {year:<80}', end='\r')
continue
# Add generic metadata
intersect_gdf['date'] = pd.to_datetime(str(year))
intersect_gdf['beach'] = beach
intersect_gdf['section'] = 'all'
intersect_gdf['source'] = 'aerial photogrammetry'
intersect_gdf['name'] = 'wadot'
intersect_gdf['id'] = (intersect_gdf.beach + '_' +
intersect_gdf.section + '_' +
intersect_gdf.profile)
# Add measurement metadata
intersect_gdf[['start_x', 'start_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[0]), axis=1)
intersect_gdf[['end_x', 'end_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[1]), axis=1)
intersect_gdf['0_dist'] = intersect_gdf.apply(
lambda x: Point(x.start_x, x.start_y).distance(x['val_point']), axis=1)
intersect_gdf[['0_x', '0_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.val_point.coords[0]), axis=1)
# Add empty slope var (not possible to compute without profile data)
intersect_gdf['slope'] = np.nan
# Keep required columns
intersect_gdf = intersect_gdf[['id', 'date', 'beach',
'section', 'profile', 'name',
'source', 'slope', 'start_x',
'start_y', 'end_x', 'end_y',
'0_dist', '0_x', '0_y']]
# Append to file
output_list.append(intersect_gdf)
# Combine all year data and export to file
if len(output_list) > 0:
shoreline_df = pd.concat(output_list)
shoreline_df.to_csv(fname_out, index=False)
else:
print(f'Skipping {beach:<80}', end='\r')
def preprocess_dasilva2021(fname='input_data/dasilva2021/dasilva_etal_2021_shorelines.shp'):
beach = 'dasilva2021'
print(f'Processing {beach:<80}', end='\r')
# Read file and filter to AHD 0 shorelines
fname='input_data/dasilva2021/dasilva_etal_2021_shorelines.shp'
val_gdf = gpd.read_file(fname).to_crs('EPSG:3577')
val_gdf = val_gdf.loc[val_gdf.Year_ > 1987]
val_gdf['Year_'] = val_gdf.Year_.astype(str)
val_gdf = val_gdf.set_index('Year_')
# If no data is returned, skip this iteration
if len(val_gdf.index) == 0:
print(f'Failed: {beach:<80}', end='\r')
return None
######################
# Generate transects #
######################
transect_gdf = gpd.read_file('input_data/dasilva2021/dasilva_etal_2021_retransects.shp').to_crs('EPSG:3577')[['TransectID', 'Direction', 'order', 'geometry']]
transect_gdf.columns = ['profile', 'section', 'order', 'geometry']
transect_gdf = transect_gdf.sort_values('order').set_index('order')
transect_gdf['profile'] = transect_gdf.profile.astype(str)
################################
# Identify 0 MSL intersections #
################################
output_list = []
# Select one year
for year in val_gdf.index.unique().sort_values():
# Extract validation contour
print(f'Processing {beach} {year:<80}', end='\r')
val_contour = val_gdf.loc[[year]].geometry.unary_union
# Copy transect data, and find intersects
# between transects and contour
intersect_gdf = transect_gdf.copy()
intersect_gdf['val_point'] = transect_gdf.intersection(val_contour)
to_keep = gpd.GeoSeries(intersect_gdf['val_point']).geom_type == 'Point'
intersect_gdf = intersect_gdf.loc[to_keep]
# If no data is returned, skip this iteration
if len(intersect_gdf.index) == 0:
print(f'Failed: {beach} {year:<80}', end='\r')
continue
# Add generic metadata
intersect_gdf['date'] = pd.to_datetime(str(year))
intersect_gdf['beach'] = beach
intersect_gdf['source'] = 'satellite'
intersect_gdf['name'] = 'dasilva2021'
intersect_gdf['id'] = (intersect_gdf.beach + '_' +
intersect_gdf.section + '_' +
intersect_gdf.profile)
# Add measurement metadata
intersect_gdf[['start_x', 'start_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[0]), axis=1)
intersect_gdf[['end_x', 'end_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[1]), axis=1)
intersect_gdf['0_dist'] = intersect_gdf.apply(
lambda x: Point(x.start_x, x.start_y).distance(x['val_point']), axis=1)
intersect_gdf[['0_x', '0_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.val_point.coords[0][0:2]), axis=1)
# Add empty slope var (not possible to compute without profile data)
intersect_gdf['slope'] = np.nan
# Keep required columns
intersect_gdf = intersect_gdf[['id', 'date', 'beach',
'section', 'profile', 'name',
'source', 'slope', 'start_x',
'start_y', 'end_x', 'end_y',
'0_dist', '0_x', '0_y']]
# Append to file
output_list.append(intersect_gdf)
# Combine all year data and export to file
if len(output_list) > 0:
shoreline_df = pd.concat(output_list)
shoreline_df.to_csv(f'output_data/{beach}.csv', index=False)
def preprocess_stirling(fname_out, datum=0):
# List containing files to import and all params to extract data
survey_xl = [
{'fname': 'input_data/stirling/2015 05 28 - From Stirling - Coastal Profiles 2014-2015 April-Feb with updated reef#2.xlsm',
'skiprows': 2,
'skipcols': 5,
'nrows': 100,
'meta_skiprows': 0,
'meta_nrows': 1,
'meta_usecols': [6, 7]},
{'fname': 'input_data/stirling/Coastal Profiles 2013-2014 JUL-MAY#2.xlsx',
'skiprows': 2,
'skipcols': 5,
'nrows': 100,
'meta_skiprows': 0,
'meta_nrows': 1,
'meta_usecols': [6, 7]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2013 JAN - JUNE#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2012 JUN - DEC#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2011-2012 NOV - MAY#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]}
]
# List to contain processed profile data
output = []
# For each survey excel file in the list above:
for survey in survey_xl:
# Load profile start metadata
all_meta = pd.read_excel(survey['fname'],
sheet_name=None,
nrows=survey['meta_nrows'],
skiprows=survey['meta_skiprows'],
usecols=survey['meta_usecols'],
header=None,
on_demand=True)
# Load data
all_sheets = pd.read_excel(survey['fname'],
sheet_name=None,
skiprows=survey['skiprows'],
nrows=survey['nrows'],
parse_dates=False,
usecols=lambda x: 'Unnamed' not in str(x))
# Iterate through each profile in survey data
for profile_id in np.arange(1, 20).astype('str'):
# Extract profile start metadata and profile data
start_x, start_y = all_meta[profile_id].values[0]
sheet = all_sheets[profile_id].iloc[:,survey['skipcols']:]
# First set all column names to lower case strings
sheet.columns = (sheet.columns.astype(str)
.str.slice(0, 10)
.str.lower())
# Drop note columns and distance/angle offset
sheet = sheet.loc[:,~sheet.columns.str.contains('note|notes')]
sheet = sheet.drop(['dist', 'angle dd'], axis=1, errors='ignore')
# Expand date column values into rows for each sampling event
sheet.loc[:,sheet.columns[::4]] = sheet.columns[::4]
# Number date columns incrementally to match other fields
start_num = 1 if survey['skipcols'] > 0 else 0
rename_dict = {name: f'date.{i + start_num}' for
i, name in enumerate(sheet.columns[::4])}
sheet = sheet.rename(rename_dict, axis=1).reset_index()
sheet = sheet.rename({'x': 'x.0', 'y': 'y.0', 'z': 'z.0'}, axis=1)
# Reshape data into long format
profile_df = pd.wide_to_long(sheet,
stubnames=['date', 'x', 'y', 'z'],
i='index',
j='dropme',
sep='.').reset_index(drop=True)
# Set datetimes
profile_df['date'] = pd.to_datetime(profile_df.date,
errors='coerce',
dayfirst=True)
# Add profile metadata
profile_df['beach'] = 'stirling'
profile_df['section'] = 'all'
profile_df['profile'] = profile_id
profile_df['name'] = 'stirling'
profile_df['source'] = 'gps'
profile_df['start_x'] = start_x
profile_df['start_y'] = start_y
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
# Add results to list
output.append(profile_df.dropna())
# Combine all survey and profile data
profiles_df = pd.concat(output)
# Reproject Perth Coastal Grid coordinates into Australian Albers
pcg_crs = '+proj=tmerc +lat_0=0 +lon_0=115.8166666666667 ' \
'+k=0.9999990600000001 +x_0=50000 +y_0=3800000 ' \
'+ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'
trans = Transformer.from_crs(pcg_crs, 'EPSG:3577', always_xy=True)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df.y.values, profiles_df.x.values)
profiles_df['start_x'], profiles_df['start_y'] = trans.transform(
profiles_df.start_y.values, profiles_df.start_x.values)
# Calculate per-point distance from start of profile
profiles_df['distance'] = profiles_df.apply(
lambda x: Point(x.start_x, x.start_y).distance(Point(x.x, x.y)), axis=1)
# Identify end of profiles by max distance from start, and merge back
max_dist = (profiles_df.sort_values('distance', ascending=False)
.groupby('id')['x', 'y']
.first()
.rename({'x': 'end_x', 'y': 'end_y'}, axis=1))
profiles_df = profiles_df.merge(max_dist, on='id')
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).first())
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
def preprocess_vicdeakin(fname,
datum=0):
# Dictionary to map correct CRSs to locations
crs_dict = {'apo': 'epsg:32754',
'cow': 'epsg:32755',
'inv': 'epsg:32755',
'leo': 'epsg:32755',
'mar': 'epsg:32754',
'pfa': 'epsg:32754',
'por': 'epsg:32755',
'prd': 'epsg:32755',
'sea': 'epsg:32755',
'wbl': 'epsg:32754'}
# Read data
profiles_df = pd.read_csv(fname,
parse_dates=['survey_date']).dropna()
# Restrict to pre-2019
profiles_df = profiles_df.loc[profiles_df.survey_date.dt.year < 2020]
profiles_df = profiles_df.reset_index(drop=True)
# Remove invalid profiles
invalid = ((profiles_df.location == 'leo') & (profiles_df.tr_id == 94))
profiles_df = profiles_df.loc[~invalid].reset_index(drop=True)
# Extract coordinates
coords = profiles_df.coordinates.str.findall(r'\d+\.\d+')
profiles_df[['x', 'y']] = pd.DataFrame(coords.values.tolist(),
dtype=np.float32)
# Add CRS and convert to Albers
profiles_df['crs'] = profiles_df.location.apply(lambda x: crs_dict[x])
profiles_df = profiles_df.groupby('crs', as_index=False).apply(
lambda x: reproj_crs(x, in_crs=x.crs.iloc[0])).drop('crs', axis=1)
profiles_df = profiles_df.reset_index(drop=True)
# Convert columns to strings and add unique ID column
profiles_df = profiles_df.rename({'location': 'beach',
'tr_id': 'profile',
'survey_date': 'date',
'z': 'z_dirty',
'z_clean': 'z'}, axis=1)
profiles_df['profile'] = profiles_df['profile'].astype(str)
profiles_df['section'] = 'all'
profiles_df['source'] = 'drone photogrammetry'
profiles_df['name'] = 'vicdeakin'
profiles_df['id'] = (profiles_df.beach + '_' +
profiles_df.section + '_' +
profiles_df.profile)
# Reverse profile distances by subtracting max distance from each
prof_max = profiles_df.groupby('id')['distance'].transform('max')
profiles_df['distance'] = (profiles_df['distance'] - prof_max).abs()
# Compute origin and end points for each profile and merge into data
start_end_xy = profiles_from_dist(profiles_df)
profiles_df = pd.merge(left=profiles_df, right=start_end_xy)
# Export each beach
for beach_name, beach in profiles_df.groupby('beach'):
# Create output file name
fname_out = f'output_data/vicdeakin_{beach_name}.csv'
print(f'Processing {fname_out:<80}', end='\r')
# Find location and distance to water for datum height (0 m AHD)
intercept_df = beach.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
beach.groupby(['id', 'date']).first())
# Compute validation slope and join into dataframe
slope = val_slope(beach, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'slope', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
def preprocess_nswbpd(fname, datum=0, overwrite=False):
# Get output filename
name = Path(fname).stem.split('_')[-1].lower().replace(' ', '')
fname_out = f'output_data/nswbpd_{name}.csv'
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Read in data
print(f'Processing {fname_out:<80}', end='\r')
profiles_df = pd.read_csv(fname, skiprows=5, dtype={'Block': str, 'Profile': str})
profiles_df['Year/Date'] = pd.to_datetime(profiles_df['Year/Date'],
dayfirst=True,
errors='coerce')
# Convert columns to strings and add unique ID column
profiles_df['Beach'] = profiles_df['Beach'].str.lower().str.replace(' ', '')
profiles_df['Block'] = profiles_df['Block'].str.lower()
profiles_df['Profile'] = profiles_df['Profile'].astype(str).str.lower()
profiles_df['id'] = (profiles_df.Beach + '_' +
profiles_df.Block + '_' +
profiles_df.Profile)
profiles_df['name'] = 'nswbpd'
# Rename columns
profiles_df.columns = ['beach', 'section', 'profile',
'date', 'distance', 'z', 'x', 'y',
'source', 'id', 'name']
# Reproject coords to Albers
trans = Transformer.from_crs('EPSG:32756', 'EPSG:3577', always_xy=True)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df.x.values, profiles_df.y.values)
# Restrict to post 1987
profiles_df = profiles_df[profiles_df['date'] > '1987']
# Compute origin and end points for each profile and merge into data
start_end_xy = profiles_from_dist(profiles_df)
profiles_df = pd.merge(left=profiles_df, right=start_end_xy)
# Drop profiles that have been assigned incorrect profile IDs.
# To do this, we use a correlation test to determine whether x
# and y coordinates within each individual profiles fall along a
# straight line. If a profile has a low correlation (e.g. less
# than 99.9), it is likely that multiple profile lines have been
# incorrectly labelled with a single profile ID.
valid_profiles = lambda x: x[['x', 'y']].corr().abs().iloc[0, 1] > 0.99
drop = (~profiles_df.groupby('id').apply(valid_profiles)).sum()
profiles_df = profiles_df.groupby('id').filter(valid_profiles)
if drop.sum() > 0: print(f'\nDropping invalid profiles: {drop:<80}')
# If profile data remains
if len(profiles_df.index) > 0:
# Restrict profiles to data that falls ocean-ward of the top of
# the foredune (the highest point in the profile) to remove
# spurious validation points, e.g. due to a non-shoreline lagoon
# at the back of the profile
foredune_dist = profiles_df.groupby(['id', 'date']).apply(
lambda x: x.distance.loc[x.z.idxmax()]).reset_index(name='foredune_dist')
profiles_df = pd.merge(left=profiles_df, right=foredune_dist)
profiles_df = profiles_df.loc[(profiles_df.distance >=
profiles_df.foredune_dist)]
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If any datum intercepts are found
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).agg(
lambda x: pd.Series.mode(x).iloc[0]))
# Compute validation slope and join into dataframe
slope = val_slope(profiles_df, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'foredune_dist', 'slope',
'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Standardise source column
standardise_source(shoreline_dist)
# Export to file
shoreline_dist.to_csv(fname_out)
else:
print(f'Skipping {fname:<80}', end='\r')
def preprocess_narrabeen(fname,
fname_out='output_data/wrl_narrabeen.csv',
datum=0,
overwrite=False):
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
#################
# Location data #
#################
# Import data and parse DMS to DD
print(f'Processing {fname_out:<80}', end='\r')
data = "PF1 -33°42'20.65 151°18'16.30 118.42\n" \
"PF2 -33°42'33.45 151°18'10.33 113.36\n" \
"PF4 -33°43'01.55 151°17'58.84 100.26\n" \
"PF6 -33°43'29.81 151°17'58.65 83.65\n" \
"PF8 -33°43'55.94 151°18'06.47 60.48"
coords = pd.read_csv(StringIO(data),
sep=' ',
names=['profile', 'y', 'x', 'angle'])
coords['x'] = [dms2dd(i) for i in coords.x]
coords['y'] = [dms2dd(i) for i in coords.y]
# Extend survey lines out from start coordinates using supplied angle
coords_end = coords.apply(
lambda x: dist_angle(x.x, x.y, 0.002, x.angle), axis=1)
coords = pd.concat([coords, coords_end], axis=1).drop('angle', axis=1)
# Rename initial x and y values to indicate profile starting coords
coords = coords.rename({'y': 'start_y', 'x': 'start_x'}, axis=1)
# Reproject coords to Albers and create geodataframe
trans = Transformer.from_crs('EPSG:4326', 'EPSG:3577', always_xy=True)
coords['start_x'], coords['start_y'] = trans.transform(
coords.start_x.values, coords.start_y.values)
coords['end_x'], coords['end_y'] = trans.transform(
coords.end_x.values, coords.end_y.values)
# Add ID column
coords['profile'] = coords['profile'].astype(str).str.lower()
coords['beach'] = 'narrabeen'
coords['section'] = 'all'
coords['name'] = 'wrl'
coords['id'] = (coords.beach + '_' +
coords.section + '_' +
coords.profile)
###############
# Survey data #
###############
# Import data
profiles_df = pd.read_csv(
fname,
usecols=[1, 2, 3, 4, 5],
skiprows=1,
parse_dates=['date'],
names=['profile', 'date', 'distance', 'z', 'source'])
# Restrict to post 1987
profiles_df = profiles_df[(profiles_df.date.dt.year > 1987)]
# Merge profile coordinate data into transect data
profiles_df['profile'] = profiles_df['profile'].astype(str).str.lower()
profiles_df = profiles_df.merge(coords, on='profile')
# Add coordinates at every supplied distance along transects
profiles_df[['x', 'y']] = profiles_df.apply(
lambda x: pd.Series(dist_along_transect(x.distance,
x.start_x,
x.start_y,
x.end_x,
x.end_y)), axis=1)
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).agg(
lambda x: pd.Series.mode(x).iloc[0]))
# Compute validation slope and join into dataframe
slope = val_slope(profiles_df, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'slope', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Standardise source column
standardise_source(shoreline_dist)
# Export to file
shoreline_dist.to_csv(fname_out)
else:
print(f'Skipping {fname:<80}', end='\r')
def preprocess_cgc(site, datum=0, overwrite=True):
# Standardise beach name from site name
beach = site.replace('NO*TH KIRRA', 'NORTH KIRRA').lower()
beach = beach.replace(' ', '').lower()
fname_out = f'output_data/cgc_{beach}.csv'
print(f'Processing {fname_out:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# List of profile datasets to iterate through
profile_list = glob.glob(f'input_data/cityofgoldcoast/{site}*.txt')
# Output list to hold data
site_profiles = []
# For each profile, import and standardise data then add to list
for profile_i in profile_list:
# Identify unique field values from file string
profile_string = os.path.basename(profile_i)
date = profile_string.split(' - (')[1][-14:-4]
section_profile = profile_string.split(' - (')[0].split(' - ')[1]
section = section_profile.split(' ')[0]
profile = ''.join(section_profile.split(' ')[1:])
# Fix missing section or profile info
if section and not profile:
section, profile = 'na', section
elif not section and not profile:
section, profile = 'na', 'na'
# Set location metadata and ID
profile_df = pd.read_csv(profile_i,
usecols=[1, 2, 3],
delim_whitespace=True,
names=['x', 'y', 'z'])
profile_df['date'] = pd.to_datetime(date)
profile_df['source'] = 'hydrographic survey'
profile_df['name'] = 'cgc'
profile_df['profile'] = profile.lower()
profile_df['section'] = section.lower()
profile_df['beach'] = beach
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
# Filter to drop pre-1987 and deep water samples, add to
# list if profile crosses 0
profile_df = profile_df[profile_df.date > '1987']
profile_df = profile_df[profile_df.z > -3.0]
if (profile_df.z.min() < datum) & (profile_df.z.max() > datum):
site_profiles.append(profile_df)
# If list of profiles contain valid data
if len(site_profiles) > 0:
# Combine individual profiles into a single dataframe
profiles_df = pd.concat(site_profiles)
# Reproject coords to Albers
trans = Transformer.from_crs('EPSG:32756', 'EPSG:3577', always_xy=True)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df.x.values, profiles_df.y.values)
# Compute origin and end points for each profile
start_xy = profiles_df.groupby(['id'], as_index=False).first()[['id', 'x', 'y']]
end_xy = profiles_df.groupby(['id'], as_index=False).last()[['id', 'x', 'y']]
start_xy = start_xy.rename({'x': 'start_x', 'y': 'start_y'}, axis=1)
end_xy = end_xy.rename({'x': 'end_x', 'y': 'end_y'}, axis=1)
# Join origin and end points into dataframe
profiles_df = pd.merge(left=profiles_df, right=start_xy)
profiles_df = pd.merge(left=profiles_df, right=end_xy)
# Compute chainage
profiles_df['distance'] = profiles_df.apply(
lambda x: Point(x.start_x, x.start_y).distance(Point(x.x, x.y)), axis=1)
# Drop profiles that have been assigned incorrect profile IDs.
# To do this, we use a correlation test to determine whether x
# and y coordinates within each individual profiles fall along a
# straight line. If a profile has a low correlation (e.g. less
# than 99.9), it is likely that multiple profile lines have been
# incorrectly labelled with a single profile ID.
valid_profiles = lambda x: x[['x', 'y']].corr().abs().iloc[0, 1] > 0.99
drop = (~profiles_df.groupby('id').apply(valid_profiles)).sum()
profiles_df = profiles_df.groupby('id').filter(valid_profiles)
if drop.sum() > 0: print(f'\nDropping invalid profiles: {drop:<80}')
# Restrict profiles to data that falls ocean-ward of the top of
# the foredune (the highest point in the profile) to remove
# spurious validation points, e.g. due to a non-shoreline lagoon
# at the back of the profile
foredune_dist = profiles_df.groupby(['id', 'date']).apply(
lambda x: x.distance.loc[x.z.idxmax()]).reset_index(name='foredune_dist')
profiles_df = pd.merge(left=profiles_df, right=foredune_dist)
profiles_df = profiles_df.loc[(profiles_df.distance >=
profiles_df.foredune_dist)]
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).first())
# Compute validation slope and join into dataframe
slope = val_slope(profiles_df, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'foredune_dist', 'slope',
'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
else:
print(f'Skipping {fname_out:<80}', end='\r')
def preprocess_sadew(fname, datum=0, overwrite=False):
# Get output filename
name = Path(fname).stem.split('_')[-1].lower().replace(' ', '')
fname_out = f'output_data/sadew_{name}.csv'
print(f'Processing {fname_out:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Load data and set nodata values to NaN
wide_df = | pd.read_csv(fname) | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
__all__ = [
"from_3d_numpy_to_2d_array",
"from_3d_numpy_to_nested",
"from_nested_to_2d_array",
"from_2d_array_to_nested",
"from_nested_to_3d_numpy",
"from_nested_to_long",
"from_long_to_nested",
"from_multi_index_to_3d_numpy",
"from_3d_numpy_to_multi_index",
"from_multi_index_to_nested",
"from_nested_to_multi_index",
"are_columns_nested",
"is_nested_dataframe",
"convert_from_dictionary",
]
def _cell_is_series_or_array(cell):
return isinstance(cell, (pd.Series, np.ndarray))
def _nested_cell_mask(X):
return X.applymap(_cell_is_series_or_array)
def are_columns_nested(X):
"""Checks whether any cells have nested structure in each DataFrame column.
Parameters
----------
X : pd.DataFrame
DataFrame to check for nested data structures.
Returns
-------
any_nested : bool
If True, at least one column is nested.
If False, no nested columns.
"""
any_nested = _nested_cell_mask(X).any().values
return any_nested
def _nested_cell_timepoints(cell):
if _cell_is_series_or_array(cell):
n_timepoints = cell.shape[0]
else:
n_timepoints = 0
return n_timepoints
def _check_equal_index(X):
"""
Check if all time-series for a given column in a
nested pandas DataFrame have the same index.
Parameters
----------
X : nested pd.DataFrame
Input dataframe with time-series in cells.
Returns
-------
indexes : list of indixes
List of indixes with one index for each column
"""
# TODO handle 1d series, not only 2d dataframes
# TODO assumes columns are typed (i.e. all rows for a given column have
# the same type)
# TODO only handles series columns, raises error for columns with
# primitives
indexes = []
# Check index for each column separately.
for c, col in enumerate(X.columns):
# Get index from first row, can be either pd.Series or np.array.
first_index = (
X.iloc[0, c].index
if hasattr(X.iloc[0, c], "index")
else np.arange(X.iloc[c, 0].shape[0])
)
# Series must contain at least 2 observations, otherwise should be
# primitive.
if len(first_index) < 2:
raise ValueError(
f"Time series must contain at least 2 observations, "
f"but found: "
f"{len(first_index)} observations in column: {col}"
)
# Check index for all rows.
for i in range(1, X.shape[0]):
index = (
X.iloc[i, c].index
if hasattr(X.iloc[i, c], "index")
else np.arange(X.iloc[c, 0].shape[0])
)
if not np.array_equal(first_index, index):
raise ValueError(
f"Found time series with unequal index in column {col}. "
f"Input time-series must have the same index."
)
indexes.append(first_index)
return indexes
def from_3d_numpy_to_2d_array(X):
"""Converts 3d NumPy array (n_instances, n_columns, n_timepoints) to
a 2d NumPy array with shape (n_instances, n_columns*n_timepoints)
Parameters
----------
X : np.ndarray
The input 3d-NumPy array with shape
(n_instances, n_columns, n_timepoints)
Returns
-------
array_2d : np.ndarray
A 2d-NumPy array with shape (n_instances, n_columns*n_timepoints)
"""
array_2d = X.reshape(X.shape[0], -1)
return array_2d
def from_nested_to_2d_array(X, return_numpy=False):
"""Convert nested pandas DataFrame or Series with NumPy arrays or
pandas Series in cells into tabular
pandas DataFrame with primitives in cells, i.e. a data frame with the
same number of rows as the input data and
as many columns as there are observations in the nested series. Requires
series to be have the same index.
Parameters
----------
X : nested pd.DataFrame or nested pd.Series
return_numpy : bool, default = False
- If True, returns a NumPy array of the tabular data.
- If False, returns a pandas DataFrame with row and column names.
Returns
-------
Xt : pandas DataFrame
Transformed DataFrame in tabular format
"""
# TODO does not handle dataframes with nested series columns *and*
# standard columns containing only primitives
# convert nested data into tabular data
if isinstance(X, pd.Series):
Xt = np.array(X.tolist())
elif isinstance(X, pd.DataFrame):
try:
Xt = np.hstack([X.iloc[:, i].tolist() for i in range(X.shape[1])])
# except strange key error for specific case
except KeyError:
if (X.shape == (1, 1)) and (X.iloc[0, 0].shape == (1,)):
# in fact only breaks when an additional condition is met,
# namely that the index of the time series of a single value
# does not start with 0, e.g. pd.RangeIndex(9, 10) as is the
# case in forecasting
Xt = X.iloc[0, 0].values
else:
raise
if Xt.ndim != 2:
raise ValueError(
"Tabularization failed, it's possible that not "
"all series were of equal length"
)
else:
raise ValueError(
f"Expected input is pandas Series or pandas DataFrame, "
f"but found: {type(X)}"
)
if return_numpy:
return Xt
Xt = pd.DataFrame(Xt)
# create column names from time index
if X.ndim == 1:
time_index = (
X.iloc[0].index
if hasattr(X.iloc[0], "index")
else np.arange(X.iloc[0].shape[0])
)
columns = [f"{X.name}__{i}" for i in time_index]
else:
columns = []
for colname, col in X.items():
time_index = (
col.iloc[0].index
if hasattr(col.iloc[0], "index")
else np.arange(col.iloc[0].shape[0])
)
columns.extend([f"{colname}__{i}" for i in time_index])
Xt.index = X.index
Xt.columns = columns
return Xt
def from_2d_array_to_nested(
X, index=None, columns=None, time_index=None, cells_as_numpy=False
):
"""Convert tabular pandas DataFrame with only primitives in cells into
nested pandas DataFrame with a single column.
Parameters
----------
X : pd.DataFrame
cells_as_numpy : bool, default = False
If True, then nested cells contain NumPy array
If False, then nested cells contain pandas Series
index : array-like, shape=[n_samples], optional (default = None)
Sample (row) index of transformed DataFrame
time_index : array-like, shape=[n_obs], optional (default = None)
Time series index of transformed DataFrame
Returns
-------
Xt : pd.DataFrame
Transformed DataFrame in nested format
"""
if (time_index is not None) and cells_as_numpy:
raise ValueError(
"`Time_index` cannot be specified when `return_arrays` is True, "
"time index can only be set to "
"pandas Series"
)
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
container = np.array if cells_as_numpy else pd.Series
# for 2d numpy array, rows represent instances, columns represent time points
n_instances, n_timepoints = X.shape
if time_index is None:
time_index = np.arange(n_timepoints)
kwargs = {"index": time_index}
Xt = pd.DataFrame(
pd.Series([container(X[i, :], **kwargs) for i in range(n_instances)])
)
if index is not None:
Xt.index = index
if columns is not None:
Xt.columns = columns
return Xt
def convert_from_dictionary(ts_dict):
"""
Simple conversion from a dictionary of each series, e.g. univariate
x = {
"Series1": [1.0,2.0,3.0,1.0,2.0],
"Series2": [3.0,2.0,1.0,3.0,2.0],
}
or multivariate, e.g.
to sktime panda format
TODO: Adapt for multivariate
"""
panda = pd.DataFrame(ts_dict)
panda = panda.transpose()
return from_2d_array_to_nested(panda)
def _concat_nested_arrays(arrs, cells_as_numpy=False):
"""
Helper function to nest tabular arrays from nested list of arrays.
Parameters
----------
arrs : list of numpy arrays
Arrays must have the same number of rows, but can have varying
number of columns.
cells_as_numpy : bool, default = False
If True, then nested cells contain NumPy array
If False, then nested cells contain pandas Series
Returns
-------
Xt : pandas DataFrame
Transformed dataframe with nested column for each input array.
"""
if cells_as_numpy:
Xt = pd.DataFrame(
np.column_stack(
[pd.Series([np.array(vals) for vals in interval]) for interval in arrs]
)
)
else:
Xt = pd.DataFrame(
np.column_stack(
[pd.Series([ | pd.Series(vals) | pandas.Series |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
| tm.assert_index_equal(result, exp, exact=True) | pandas.util.testing.assert_index_equal |
from revoscalepy import RxSqlServerData, rx_import, rx_data_step, RxOdbcData, rx_write_object, rx_read_object, rx_get_var_names
import glob, shutil
import os, random, collections
import operator
import numpy as np, scipy.spatial.distance
from pandas import DataFrame, concat
from sklearn.utils import shuffle
random.seed(0)
def get_directories_in_directory(directory):
return [s for s in os.listdir(directory) if os.path.isdir(directory + "/" + s)]
def get_files_in_directory(directory, postfix = ""):
if not os.path.exists(directory):
return []
fileNames = [s for s in os.listdir(directory) if not os.path.isdir(directory + "/" + s)]
if not postfix or postfix == "":
return fileNames
else:
return [s for s in fileNames if s.lower().endswith(postfix)]
def get_random_number(low, high):
randomNumber = random.randint(low, high)
return randomNumber
def get_random_list_element(listND, containsHeader = False):
if containsHeader:
index = get_random_number(1, len(listND) - 1)
else:
index = get_random_number(0, len(listND) - 1)
return listND[index]
def compute_vector_distance(vec1, vec2, method):
assert (len(vec1) == len(vec2))
# Distance computation
vecDiff = vec1 - vec2
method = method.lower()
if method == 'l2':
dist = np.linalg.norm(vecDiff, 2)
elif method == "cosine":
dist = scipy.spatial.distance.cosine(vec1, vec2)
else:
raise Exception("Distance method unknown: " + method)
assert (not np.isnan(dist))
return dist
def map_images_to_predictedscores(classifierOutput):
features = dict()
for index, row in classifierOutput.iterrows():
key = row['image']
features[key] = row.drop(['image', 'Label', 'PredictedLabel'])
return features
def is_same_class(queryClass, targetClass):
queryElements = queryClass.split("\\")
query = queryElements[len(queryElements) - 2]
targetElements = targetClass.split("\\")
target = targetElements[len(targetElements) - 2]
return query == target
def prepare_evaluation_set(conn_str, feature_table, test_table, evaluation_table, maxQueryImgsPerCat, maxNegImgsPerQueryImg):
evaluation_set = DataFrame()
query = "SELECT image, Label FROM {} WHERE image IN (SELECT image FROM {})".format(feature_table, test_table)
test_images = RxSqlServerData(sql_query=query, connection_string=conn_str)
test_images_df = rx_import(test_images)
classes = test_images_df.Label.unique()
for queryCat in classes:
query_images = shuffle(test_images_df.loc[test_images_df["Label"] == queryCat, "image"])
selectQueryImages = query_images.sample(n=maxQueryImgsPerCat, random_state=0, replace=True)
for index, queryImage in selectQueryImages.iteritems():
refImage = get_random_list_element(list(set(query_images) - set([queryImage])))
firstitem = DataFrame({'queryCat': [queryCat], 'queryImage': [queryImage], 'refCat': [queryCat], 'refImage': [refImage]})
evaluation_set = concat([evaluation_set, firstitem])
for _ in range(maxNegImgsPerQueryImg):
refCat = get_random_list_element(list(set(classes) - set([queryCat])))
refImages = test_images_df.loc[test_images_df["Label"] == refCat, "image"]
refImage = shuffle(refImages).sample(n=1, random_state=0)
refImage = refImage.iloc[0]
secitem = DataFrame({'queryCat': [queryCat], 'queryImage': [queryImage], 'refCat': [refCat], 'refImage': [refImage]})
evaluation_set = concat([evaluation_set, secitem])
evaluation_images = RxSqlServerData(table=evaluation_table, connection_string=conn_str)
rx_data_step(evaluation_set, evaluation_images, overwrite=True)
def rank_candiate_images(conn_str, queryImageVector, candidateImageVector, top_k_candidates, results_table):
ranking_results = | DataFrame() | pandas.DataFrame |
# Torch imports
import torch as torch
from torch.utils import data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# HexagDLy import
import hexagdly as hg
import copy
import numpy as np
import pandas as pd
import math
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_log_bins(l1, nbins):
bins = np.geomspace(min(l1), max(l1), nbins)
return bins
def get_discrete_colormap(num, colormapstr = 'viridis'):
cmap = plt.get_cmap(colormapstr)
colco = [cmap(i) for i in range(0, cmap.N, int(cmap.N / (num - 1)))]
colco.append(cmap(cmap.N))
cmap2 = mpl.colors.LinearSegmentedColormap.from_list(
'Custom cmap', colco, num)
return cmap2
def save_model(path, net, optimizer, epoch, trainloss=0.0, testloss = 0.0, additional_info=""):
d1 = {'epoch': epoch, 'model_state_dict':net.state_dict(), 'optimizer_state_dict':optimizer.state_dict(),
'trainloss': trainloss, 'testloss': testloss, "additional_info":additional_info}
#print("QQ1: ",d1)
torch.save(d1, path)
def load_model(path, base_net, base_optimizer):
checkpoint = torch.load(path)
base_net.load_state_dict(checkpoint['model_state_dict'])
base_optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
additional_info = checkpoint['additional_info']
testloss = checkpoint['testloss']
trainloss = checkpoint['trainloss']
# base_net.eval()
# - or -
base_net.train()
return {"net":base_net, 'optimizer':base_optimizer , 'epoch': epoch, 'trainloss': trainloss,
'testloss': testloss, "additional_info": additional_info }
def get_image_from_datahandler(h, imgid):
return h.get_whole_dataset()[imgid]
def process_data(x):
n2 = max(x.iloc[8:2086])
x.iloc[8:2086] = x.iloc[8:2086].div(n2)
return x
def process_data2(x):
x_a = x[13:2091]
x_array2 = x_a / max(x_a)
x_array2 = x_array2 - 0.5 - min(x_array2) / 2
x_array2 = x_array2 * 1 / max(x_array2)
x_array2 = pd.concat([x[0:9], x_array2])
return x_array2
def process_data_gauss(x, colnum, pixnum):
x_a = x[colnum: colnum+2*pixnum]
x_t = x[colnum + 2 * pixnum: colnum + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#x_a = np.log10(x_a+100)
mean = np.mean(x_a, None)
std = np.std(x_a, None)
x_a -= mean
x_a /= std
x_tarray2 = (((x_t - 10) * 1) / (60 - 0)) #(((value - old_min) * new_range) / (old_max - old_min) - new_min)
x_array2 = pd.concat([x[:colnum], x_a, x_tarray2, t_data])
return x_array2
def process_data3_tc(x):
x_a = x[9:2087]
x_t = x[2087:4165]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 2) / (max(x_a) - min(x_a))) - 1
x_t2 = (((x_t - 10) * 1) / (60 - 10)) - 0
x_array2 = pd.concat([x[0:9], x_array2])
x_array2 = pd.concat([x_array2, x_t2])
x_array2 = pd.concat([x_array2,t_data])
return x_array2
def process_data3(x, info_col_num, pixnum):
x_a = x[info_col_num:info_col_num+2*pixnum]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 2) / (max(x_a) - min(x_a))) - 1
x_array2 = pd.concat([x[0:info_col_num], x_array2])
x_array2 = pd.concat([x_array2, x_t, t_data])
return x_array2
def process_data4(x):
x_a = x[9:2087]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 2) / (2500 - -1)) - 1
x_tarray2 = (((x_a - -1) * 2) / (2500 - -1)) - 1
x_array2 = pd.concat([x[0:9], x_array2])
x_array2 = pd.concat([x_array2,x_tarray2,t_data])
return x_array2
def process_data3_i_image(x_a, colnum, pixnum):
#x_a = x[9:2087]
#xs = x_a.shape
#print(x_a.shape)
#x_a=x_a.view(-1,40*40)
#t_data = [min(x_a),max(x_a)]
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
for i, x_loop in enumerate(x_a):
x_a[i] = (((x_loop - min(x_loop)) * 1) / (max(x_loop) - min(x_loop)))
#x_a=x_a.view(xs)
return x_a
'''
def process_data3_i(x, info_col_num, pixnum):
x_a = x[info_col_num:info_col_num + 2 * pixnum]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 1) / (max(x_a) - min(x_a)))
x_array2 = pd.concat([x[0:info_col_num], x_array2])
x_array2 = pd.concat([x_array2, x_t, t_data])
return x_array2
'''
def process_data3_i(x, colnum, pixnum):
x_a = x[colnum: colnum+2*pixnum]
x_t = x[colnum + 2 * pixnum: colnum + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 1) / (max(x_a) - min(x_a)))
x_tarray2 = (((x_t - 10) * 1) / (60 - 0)) #(((value - old_min) * new_range) / (old_max - old_min) - new_min)
x_array2 = pd.concat([x[:colnum], x_array2, x_tarray2, t_data])
return x_array2
def process_data4_i(x, colnum, pixnum):
x_a = x[colnum: colnum+2*pixnum]
x_t = x[colnum + 2 * pixnum: colnum + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 1) / (2500))
x_tarray2 = (((x_t - 10) * 1) / (60 - 0)) #(((value - old_min) * new_range) / (old_max - old_min) - new_min)
x_array2 = | pd.concat([x[:colnum], x_array2, x_tarray2, t_data]) | pandas.concat |
import asyncio
import json
import pandas as pd
import numpy as np
import aiohttp
from understat import Understat
from config import Config
class Parser(Config):
def __init__(self, Config):
# наследование параметров
super().__init__()
def json_to_series(self, i):
row = []
# берем каждый столбец
for j in range(len(i)):
# если это словарь, размножаем на кол-во элементов
if type(i[j]) == dict:
# keys = i[j].keys()
[row.append(i[j][k]) for k in i[j].keys()]
# print(cast)
else:
row.append(i[j])
# print(i[j])
return row
# Modules Understat
# get_league_results(self, league_name, season, options=None, **kwargs)
async def get_league_results(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
main_df = []
for league_name in self.leagues:
for season in self.seasons:
league_stats = await understat.get_league_results(
league_name=league_name,
season=season
)
for r in league_stats:
main_df.append(
[
r['id'],
r['datetime'],
league_name,
season,
r['h']['title'],
r['h']['short_title'],
r['h']['id'],
r['xG']['h'],
r['a']['title'],
r['a']['short_title'],
r['a']['id'],
r['xG']['a'],
r['forecast']['w'],
r['forecast']['d'],
r['forecast']['l'],
r['goals']['h'],
r['goals']['a'],
]
)
main_df = pd.DataFrame(
main_df,
columns=[
'IdMatch',
'DateTime',
'LeagueName',
'Season',
'HomeTeam',
'HomeShortTeam',
'HomeTeamId',
'HomeTeam_xG',
'AwayTeam',
'AwayShortTeam',
'AwayTeamId',
'AwayTeam_xG',
'WinProba',
'DrawProba',
'LoseProba',
'FTHG',
'FTAG'
]
)
# print(league_stats)
return main_df
# get_league_fixtures(self, league_name, season, options=None, **kwargs)
async def fixtures(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
fixtures_df = []
for league_name in self.leagues:
for season in self.fixtures_seasons:
league_stats = await understat.get_league_fixtures(
league_name=league_name,
season=season
)
for r in league_stats:
fixtures_df.append(
[
r['id'],
r['datetime'],
league_name,
season,
r['h']['title'],
r['h']['short_title'],
r['h']['id'],
r['xG']['h'],
r['a']['title'],
r['a']['short_title'],
r['a']['id'],
r['xG']['a'],
None,
None,
None,
None,
None,
]
)
fixtures_df = pd.DataFrame(
fixtures_df,
columns=[
'IdMatch',
'DateTime',
'LeagueName',
'Season',
'HomeTeam',
'HomeShortTeam',
'HomeTeamId',
'HomeTeam_xG',
'AwayTeam',
'AwayShortTeam',
'AwayTeamId',
'AwayTeam_xG',
'WinProba',
'DrawProba',
'LoseProba',
'FTHG',
'FTAG'
]
)
return fixtures_df # ,league_stats team_stats,league_stats,all_stats
# get_teams(self, league_name, season, options=None, **kwargs)
async def teams(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
# leagues = ["epl", "la_liga", "bundesliga", "serie_a", "ligue_1", "rfpl"]
# seasons = [2019]
teams = []
for league_name in self.leagues:
for season in self.seasons:
league_stats = await understat.get_teams(
league_name=league_name,
season=season
)
for r in league_stats:
for r_history in r['history']:
teams.append(
[
r['id'],
r['title'],
league_name,
season,
r_history['date'],
r_history['h_a'],
r_history['xG'],
r_history['xGA'],
r_history['npxG'],
r_history['npxGA'],
r_history['ppda']['att'],
r_history['ppda']['def'],
r_history['ppda_allowed']['att'],
r_history['ppda_allowed']['def'],
r_history['deep'],
r_history['deep_allowed'],
r_history['scored'],
r_history['missed'],
r_history['xpts'],
r_history['result'],
r_history['wins'],
r_history['draws'],
r_history['loses'],
r_history['pts'],
r_history['npxGD'],
]
)
teams = pd.DataFrame(
teams
, columns = [
'TeamId',
'TeamName',
'LeagueName',
'Season',
'DateTime',
'h_a',
'xG',
'xGA',
'npxG',
'npxGA',
'ppda_att',
'ppda_def',
'ppda_allowed_att',
'ppda_allowed_def',
'deep',
'deep_allowed',
'scored',
'missed',
'xpts',
'result',
'wins',
'draws',
'loses',
'pts',
'npxGD',
]
)
# format data values
for c in ['TeamId', 'Season']:
teams[c] = teams[c].astype(np.int32)
for c in ['xG', 'xGA','npxG', 'npxGA', 'ppda_att', 'ppda_def', 'ppda_allowed_att',
'ppda_allowed_def', 'deep', 'deep_allowed', 'scored', 'missed', 'xpts',
'wins', 'draws', 'loses', 'pts', 'npxGD'
]: # 'result',
teams[c] = teams[c].astype(np.float32)
teams['DateTime'] = pd.to_datetime(teams['DateTime'])# .astype('datetime[64]')
return teams
# get_stats(self, options=None, **kwargs)
async def stats(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
# leagues = ["epl", "la_liga", "bundesliga", "serie_a", "ligue_1", "rfpl"]
# example
# {'league_id': '6',
# 'league': 'RFPL',
# 'h': '1.4583',
# 'a': '1.1250',
# 'hxg': '1.450178946678837',
# 'axg': '1.016120968464141',
# 'year': '2014',
# 'month': '8',
# 'matches': '48'}
list_data = []
get_data = await understat.get_stats()
for r in get_data:
list_data.append(
[
r['league_id'],
r['league'],
r['h'],
r['a'],
r['hxg'],
r['axg'],
r['year'],
r['month'],
r['matches'],
]
)
list_data = pd.DataFrame(
list_data
, columns = [
'LeagueId',
'League',
'Home',
'Away',
'Home_xG',
'Away_xG',
'Year',
'Month',
'Matches',
]
)
# format data values
for c in ['LeagueId','Year','Month','Matches',]:
list_data[c] = list_data[c].astype(np.int32)
for c in ['Home','Away','Home_xG','Away_xG',]:
list_data[c] = list_data[c].astype(np.float32)
# list_data['DateTime'] = pd.to_datetime(list_data['DateTime'])
return list_data
# get_team_players(self, team_name, season, options=None, **kwargs)
async def player_stats(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
# example
# {'id': '594', 'player_name': '<NAME>', 'games': '37', 'time': '3271',
# 'goals': '25', 'xG': '16.665452419780195', 'assists': '6', 'xA': '5.440816408023238',
# 'shots': '110', 'key_passes': '47', 'yellow_cards': '3', 'red_cards': '0', 'position': 'F S',
# 'team_title': 'Everton', 'npg': '24', 'npxG': '15.904283582232893', 'xGChain': '21.251998490653932',
# 'xGBuildup': '3.9702013842761517'}
list_data = []
for i in teams_df[['TeamName','Season']].drop_duplicates().values:
get_data = await understat.get_team_players(team_name = i[0], season = i[1])
# columns keys from json
keys = list(get_data[0].keys())
for r in get_data:
list_data.append(
[i[1]] + [
r[k] for k in keys
]
)
# create dataframe
list_data = pd.DataFrame(
list_data
, columns = ['Season'] + keys
)
# format data values
for c in [
'id', 'games', 'time', 'goals', 'assists', 'shots', 'key_passes', 'yellow_cards', 'red_cards',
'npg', 'Season'
]:
list_data[c] = list_data[c].astype(np.int32)
for c in ['xG', 'xA', 'npxG', 'xGChain', 'xGBuildup']:
list_data[c] = list_data[c].astype(np.float32)
# list_data['DateTime'] = pd.to_datetime(list_data['DateTime'])
print(get_data[0])
return list_data
# understat.get_league_players(league_name=l, season=s)
async def league_players(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
list_data = []
for l in self.leagues:
for s in self.seasons:
get_data = await understat.get_league_players(league_name=l, season=s)
if len(get_data) > 0:
# columns keys from json
keys = list(get_data[0].keys())
for r in get_data:
list_data.append(
[l] + [s] + [
r[k] for k in keys
]
)
# create dataframe
list_data = pd.DataFrame(
list_data
, columns = ['League'] + ['Season'] + keys
)
# format data values
for c in ['id', 'games', 'time', 'goals', 'assists', 'shots', 'key_passes', 'yellow_cards', 'red_cards', 'npg']:
list_data[c] = list_data[c].astype(np.int32)
for c in ['xG', 'xA', 'npxG', 'xGChain', 'xGBuildup']:
list_data[c] = list_data[c].astype(np.float32)
return list_data
# understat.get_player_matches(league_name=l, season=s)
async def player_matches(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
list_data = []
# for l in leagues:
# for s in seasons:
for i in df_league_players['id'].unique():
# print(i, df_league_players[df_league_players['id']==i]['player_name'].values[0])
player_id = i
player_name = df_league_players[df_league_players['id']==i]['player_name'].values[0]
get_data = await understat.get_player_matches(player_id = i)
if len(get_data) > 0:
# columns keys from json
keys = list(get_data[0].keys())
for r in get_data:
list_data.append(
[i] + [player_name] + [
r[k] for k in keys
]
)
# create dataframe
list_data = pd.DataFrame(
list_data
, columns = ['player_id'] + ['player_name'] + keys
)
list_data['date'] = pd.to_datetime(list_data['date'])
# format data values
for c in ['player_id', 'goals', 'shots', 'time', 'h_goals', 'a_goals', 'id', 'season','roster_id', 'assists', 'key_passes', 'npg']:
list_data[c] = list_data[c].astype(np.int32)
for c in ['xA', 'xG','npxG', 'xGChain','xGBuildup']:
list_data[c] = list_data[c].astype(np.float32)
list_data.rename(columns={'id':'IdMatch', 'date':'DateTime'}, inplace=True)
return list_data
# get_team_results(self, team_name, season, options=None, **kwargs)
async def team_results(self):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
list_data = []
for i in teams_df[['TeamName','Season']].drop_duplicates().values:
get_data = await understat.get_team_results(team_name = i[0], season = i[1])
if len(get_data) > 0:
# columns keys from json
keys = list(get_data[0].keys())
for r in get_data:
list_data.append(
[i[0]] + [i[1]] + [
r[k] for k in keys
]
)
cols = [
'TeamName',
'Season',
'IdMatch',
'Flag',
'h_a',
'HomeTeamId',
'HomeTeam',
'ShortHomeTeamName',
'AwayTeamId',
'AwayTeam',
'ShortAwayTeamName',
'FTHG',
'FTAG',
'HomeTeam_xG',
'AwayTeam_xG',
'DateTime',
'Frcst_proba_w',
'Frcst_proba_d',
'Frcst_proba_l',
'Result'
]
series_list_data = []
for i in list_data:
series_list_data.append(self.json_to_series(i))
df_team_results = pd.DataFrame(series_list_data, columns= cols)
df_team_results['DateTime'] = pd.to_datetime(df_team_results['DateTime'])
# format data values
for c in ['Season','IdMatch','HomeTeamId','AwayTeamId','FTHG','FTAG']:
df_team_results[c] = df_team_results[c].astype(np.int32)
for c in ['HomeTeam_xG','AwayTeam_xG','Frcst_proba_w','Frcst_proba_d', 'Frcst_proba_l']:
df_team_results[c] = df_team_results[c].astype(np.float32)
return df_team_results
async def get_parse_data(self, update_data=True, resaved_data=True):
if update_data:
df = await self.get_league_results()
fixtures_df = await self.fixtures()
df['IsResult'] = True
fixtures_df['IsResult'] = False
df = pd.concat([df,fixtures_df],sort=False)
for c in ['IdMatch', 'Season','HomeTeamId','AwayTeamId']:
df[c] = df[c].astype(np.int32)
for c in ['HomeTeam_xG','AwayTeam_xG','WinProba','DrawProba','LoseProba','FTHG','FTAG']:
df[c] = df[c].astype(np.float32)
df['DateTime'] = pd.to_datetime(df['DateTime'])
df['Date'] = df['DateTime'].dt.date
teams_df = await self.teams()
if resaved_data:
df.to_pickle('./data/df.pkl')
teams_df.to_pickle('./data/teams_df.pkl')
else:
try:
df = pd.read_pickle('./data/df.pkl')
teams_df = | pd.read_pickle('./data/teams_df.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
import datetime
import os
import pandas as pd
import logging
from src.config import PROC_DATA_DIR, DT_ID_COLS, AQ_COLS
def _series_as_str(column: pd.Series, zfill_len: int = 2) -> pd.Series:
if zfill_len:
return column.astype(str).str.zfill(zfill_len)
else:
return column.astype(str)
def create_dt_features(stations_df: pd.DataFrame) -> pd.DataFrame:
""" Build year, month, day, weekday columns and relevant average values"""
dt_stations_df = stations_df.copy().set_index('date')
dt_stations_df['year'] = _series_as_str(dt_stations_df.index.year)
dt_stations_df['month'] = _series_as_str(dt_stations_df.index.month)
dt_stations_df['year_month'] = dt_stations_df.index.strftime('%Y-%m')
dt_stations_df['day'] = _series_as_str(dt_stations_df.index.day)
dt_stations_df['weekofyear'] = _series_as_str(dt_stations_df.index.weekofyear)
dt_stations_df['wday'] = _series_as_str(dt_stations_df.index.weekday, zfill_len=1)
return dt_stations_df
def load_air_quality_data(proc_file: str = 'stations_data.csv') -> pd.DataFrame:
""" Read processed data from csv. Date is parsed as datetime object. """
path_to_file = os.path.join(PROC_DATA_DIR, proc_file)
logging.info("loading file {f}".format(f=path_to_file))
stations_df = | pd.read_csv(path_to_file, parse_dates=['date']) | pandas.read_csv |
#
# Copyright (C) 2021 The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from datetime import date
from typing import Optional, Sequence
import pandas as pd
from delta_sharing.protocol import AddFile, Metadata, Table
from delta_sharing.reader import DeltaSharingReader
from delta_sharing.rest_client import ListFilesInTableResponse, DataSharingRestClient
from delta_sharing.tests.conftest import ENABLE_INTEGRATION, SKIP_MESSAGE
def test_to_pandas_non_partitioned(tmp_path):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
pdf2 = pd.DataFrame({"a": [4, 5, 6], "b": ["d", "e", "f"]})
pdf1.to_parquet(tmp_path / "pdf1.parquet")
pdf2.to_parquet(tmp_path / "pdf2.parquet")
class RestClientMock:
def list_files_in_table(
self,
table: Table,
*,
predicateHints: Optional[Sequence[str]] = None,
limitHint: Optional[int] = None,
) -> ListFilesInTableResponse:
assert table == Table("table_name", "share_name", "schema_name")
metadata = Metadata(
schema_string=(
'{"fields":['
'{"metadata":{},"name":"a","nullable":true,"type":"long"},'
'{"metadata":{},"name":"b","nullable":true,"type":"string"}'
'],"type":"struct"}'
)
)
add_files = [
AddFile(
url=str(tmp_path / "pdf1.parquet"),
id="pdf1",
partition_values={},
size=0,
stats="",
),
AddFile(
url=str(tmp_path / "pdf2.parquet"),
id="pdf2",
partition_values={},
size=0,
stats="",
),
]
return ListFilesInTableResponse(
table=table, protocol=None, metadata=metadata, add_files=add_files
)
reader = DeltaSharingReader(Table("table_name", "share_name", "schema_name"), RestClientMock())
pdf = reader.to_pandas()
expected = pd.concat([pdf1, pdf2]).reset_index(drop=True)
pd.testing.assert_frame_equal(pdf, expected)
def test_to_pandas_partitioned(tmp_path):
pdf1 = pd.DataFrame({"a": [1, 2, 3]})
pdf2 = pd.DataFrame({"a": [4, 5, 6]})
pdf1.to_parquet(tmp_path / "pdf1.parquet")
pdf2.to_parquet(tmp_path / "pdf2.parquet")
class RestClientMock:
def list_files_in_table(
self,
table: Table,
*,
predicateHints: Optional[Sequence[str]] = None,
limitHint: Optional[int] = None,
) -> ListFilesInTableResponse:
assert table == Table("table_name", "share_name", "schema_name")
metadata = Metadata(
schema_string=(
'{"fields":['
'{"metadata":{},"name":"a","nullable":true,"type":"long"},'
'{"metadata":{},"name":"b","nullable":true,"type":"string"}'
'],"type":"struct"}'
)
)
add_files = [
AddFile(
url=str(tmp_path / "pdf1.parquet"),
id="pdf1",
partition_values={"b": "x"},
size=0,
stats="",
),
AddFile(
url=str(tmp_path / "pdf2.parquet"),
id="pdf2",
partition_values={"b": "y"},
size=0,
stats="",
),
]
return ListFilesInTableResponse(
table=table, protocol=None, metadata=metadata, add_files=add_files
)
reader = DeltaSharingReader(Table("table_name", "share_name", "schema_name"), RestClientMock())
pdf = reader.to_pandas()
expected1 = pdf1.copy()
expected1["b"] = "x"
expected2 = pdf2.copy()
expected2["b"] = "y"
expected = | pd.concat([expected1, expected2]) | pandas.concat |
"""
Setup directories & files for new experiment.
Based on init_runame.yml file, sets up:
- inits/
- runs/
- jobs/
- sims/
- subjob.s
"""
import os
import numpy as np
import pandas as pd
import yaml
from numpy import genfromtxt
from ideotype.utils import (get_filelist,
stomata_waterstress,
estimate_pdate)
from ideotype.data import DATA_PATH
def read_inityaml(run_name, yamlfile=None):
"""
Read in init_runame yaml file.
yaml file inclues all setup info for a particular experiment run.
Parameters
----------
run_name: str
Run name for particular batch of simulations.
yamlfile: str
default None - function reads init_runame.yml file in project dirct.
a testing yamlfile path need to be passed for testing purposes.
Returns
-------
dict_setup: dictionary
Dictionary that only includes experiment setup info.
"""
# Default situation
if yamlfile is None:
# Fetch yaml file with experiment setup specs
# yaml files all stored in ideotype/data/inits/
fname_init = os.path.join(DATA_PATH,
'inits',
'init_' + run_name + '.yml')
# Manul input a test yamlfile to function for testing purposes
else:
fname_init = yamlfile
if not os.path.isfile(fname_init): # check whether init_.yml file exists
raise ValueError(f'init param file {fname_init} does not exist!')
# read in init param yaml file
with open(fname_init, 'r') as pfile:
dict_init = yaml.safe_load(pfile)
if not dict_init['setup']['run_name'] == run_name:
raise ValueError('mismatch run_name between yaml file name'
'and setup record within yaml file!')
dict_setup = dict_init['setup']
dict_setup['params'] = dict_init['params']
dict_setup['specs'] = dict_init['specs']
dict_setup['init'] = dict_init['init']
dict_setup['time'] = dict_init['time']
dict_setup['climate'] = dict_init['climate']
dict_setup['management'] = dict_init['management']
dict_setup['cultivar'] = dict_init['cultivar']
return dict_setup
def read_siteinfo(file_siteinfo, file_siteyears):
"""
Read in site info and siteyears.
Parameters:
-----------
file_siteinfo : str
file path for site info
file_siteyears : str
file path for siteyears info
Returns:
--------
site_info : pd dataframe
siteyears : pd dataframe
"""
site_info = pd.read_csv(file_siteinfo,
dtype={'USAF': str},
usecols=[0, 1, 3, 4, 8, 9, 10])
site_info.columns = ['site', 'class', 'station',
'state', 'tzone', 'lat', 'lon']
siteyears = pd.read_csv(file_siteyears, dtype=str)
return site_info, siteyears
def make_dircts(run_name, yamlfile=None, cont_years=True, cont_cvars=True):
"""
Make all required directories in experiment directory.
Directories include experiment-specific subdirectories for:
1. /inits
1.1 /inits/cultivars
1.2 /inits/customs
2. /jobs
3. /runs
4. /sims
Parameters
----------
run_name: str
Run name for specific batch of simualtions.
yamlfile: str
Default None - function reads init_runame.yml file in project dirct.
a testing yamlfile path need to be passed for testing purposes.
cont_years: Bool
Default True
How yaml file stores simulation years info.
True: stored start and end year assuming all years in between.
False: stores individual years (testing purposes)
cont_cvars: Bool
Default True
How yaml file stores simulation cvars info.
True: stored single number representing the total number of cultivas.
Fals: stored specific cultivars (testing purposes).
"""
# Read in setup yaml file
dict_setup = read_inityaml(run_name, yamlfile=yamlfile)
# Set up project directory
dirct_project = dict_setup['path_project']
# Set up filepaths
if dict_setup['base_path'] == 'testing':
basepath = DATA_PATH
fpath_siteinfo = os.path.join(basepath,
*dict_setup['site_info'])
fpath_siteyears = os.path.join(basepath,
*dict_setup['siteyears'])
else:
fpath_siteinfo = os.path.join(
DATA_PATH, 'sites', dict_setup['site_info'])
fpath_siteyears = os.path.join(
DATA_PATH, 'siteyears', dict_setup['siteyears'])
# Read in site-years
df_siteinfo, df_siteyears = read_siteinfo(fpath_siteinfo,
fpath_siteyears)
# *** /inits/cultivars
dirct_inits_cultivars = os.path.join(dirct_project,
'inits',
'cultivars',
run_name)
# Check if folder exits, only execute if not
if not os.path.isdir(dirct_inits_cultivars):
os.mkdir(dirct_inits_cultivars)
else:
raise ValueError(f'directory {dirct_inits_cultivars} already exists!')
# *** /jobs
dirct_jobs = os.path.join(dirct_project, 'jobs', run_name)
if not os.path.isdir(dirct_jobs):
os.mkdir(dirct_jobs)
else:
raise ValueError(f'directory {dirct_jobs} already exists!')
# Set up years & cvars info
years = dict_setup['specs']['years'] # fetch from init_runame.yml
cvars = dict_setup['specs']['cvars'] # fetch from init_runame.yml
# Check if cultivar in yamlfile is continuous or not
# False case mostly for testing purposes
cultivars = list()
if cont_cvars is True:
cvars_iter = np.arange(cvars[0])
else:
cvars_iter = cvars
# Assemble cultivars
for var in cvars_iter:
cultivar = 'var_' + str(var)
cultivars.append(cultivar)
# *** /inits/customs
dirct_inits_customs = os.path.join(
dirct_project, 'inits', 'customs', run_name)
if not os.path.isdir(dirct_inits_customs):
os.mkdir(dirct_inits_customs)
else:
raise ValueError(
f'directory {dirct_inits_customs} already exists!')
for cultivar in cultivars:
os.mkdir(os.path.join(dirct_inits_customs, cultivar))
for row in np.arange(df_siteyears.shape[0]):
siteyear = (f'{df_siteyears.iloc[row].site}_'
f'{df_siteyears.iloc[row].year}')
dirct_inits_customs_siteyear = os.path.join(dirct_project,
'inits',
'customs',
run_name,
cultivar,
siteyear)
os.mkdir(dirct_inits_customs_siteyear)
# *** /runs & /sims
for folder in (['runs'], ['sims']):
dirct_folder = os.path.join(dirct_project, *folder, run_name)
if not os.path.isdir(dirct_folder):
# make /runs or /sims directory with run_name
os.mkdir(dirct_folder)
# check if years in yaml file are consecutive range or individual
# False case mostly for testing
if cont_years is True:
years_iter = np.arange(years[0], years[1]+1)
else:
years_iter = years
# create top level year directories
for year in years_iter:
os.mkdir(os.path.join(dirct_folder, str(year)))
# create second layer cultivar directories
for cultivar in cultivars:
os.mkdir(os.path.join(dirct_folder,
str(year),
str(cultivar)))
else:
raise ValueError(f'directory {dirct_folder} already exists!')
def make_inits(run_name, yamlfile=None, cont_cvars=True):
"""
Create custom init files for MAIZSIM sims.
Parameters
----------
run_name: str
run name for batch of experiments.
yamlfile: str
default None - function reads init_runame.yml file in project dirct.
a testing yamlfile path need to be passed for testing purposes.
Returns
-------
init.txt
time.txt
climate.txt
management.txt
"""
dict_setup = read_inityaml(run_name, yamlfile=yamlfile)
dirct_project = dict_setup['path_project']
dirct_init = os.path.join(dirct_project, 'inits', 'customs', run_name)
# only execute if no run files already exist
filelist = get_filelist(os.path.expanduser(dirct_init))
if len(filelist) == 0:
# read in site_info & siteyears info
if dict_setup['base_path'] == 'testing':
basepath = DATA_PATH
fpath_siteinfo = os.path.join(basepath,
*dict_setup['site_info'])
fpath_siteyears = os.path.join(basepath,
*dict_setup['siteyears'])
fpath_params = os.path.join(basepath,
*dict_setup['path_params'])
fpath_weas = os.path.join(basepath,
*dict_setup['path_wea'])
else:
fpath_siteinfo = os.path.join(
DATA_PATH, 'sites', dict_setup['site_info'])
fpath_siteyears = os.path.join(
DATA_PATH, 'siteyears', dict_setup['siteyears'])
fpath_params = os.path.join(
DATA_PATH, 'params', dict_setup['path_params'])
fpath_weas = os.path.join(
dict_setup['path_project'], *dict_setup['path_wea'])
df_siteinfo, df_siteyears = read_siteinfo(fpath_siteinfo,
fpath_siteyears)
df_params = pd.read_csv(fpath_params)
# fetch & setup site-years info
data = genfromtxt(fpath_siteyears,
delimiter=',',
skip_header=1,
usecols=(0, 1),
dtype=('U6', int, int, 'U10'))
siteyears = []
for row in data:
siteyears.append(str(row[0]) + '_' + str(row[1]))
# Set up cultivars
cvars = dict_setup['specs']['cvars'] # fetch cultivar numbers
# Check if cultivar in yamlfile is continuous or not
# False case for control sim & testing purposes
cultivars = list()
if cont_cvars is True:
cvars_iter = np.arange(cvars[0])
else:
cvars_iter = cvars
# assemble cultivars
for var in cvars_iter:
cultivar = 'var_' + str(var)
cultivars.append(cultivar)
# Loop through cultivars
for cultivar in cultivars:
for siteyear in siteyears:
site = siteyear.split('_')[0]
year = siteyear.split('_')[1]
lat = df_siteinfo[df_siteinfo.site == site].lat.item()
lon = df_siteinfo[df_siteinfo.site == site].lon.item()
# *** init.txt
init_txt = open(os.path.join(dirct_init,
cultivar,
siteyear,
'init.txt'), 'w')
# Dynamic pdate but gdd not perturbed
if dict_setup['init']['plant_date'] == 'dynamic':
# use default gdd_threhold value
gdd = dict_setup['params']['gdd']
start, sowing = estimate_pdate(fpath_weas,
site,
year,
gdd)
# Dynamic pdate with perturbed gdd
elif dict_setup['init']['plant_date'] == 'dynamic_perturbed':
# use perturbed gdd value
gdd = df_params.loc[int(cultivar.split('_')[-1]), 'gdd']
start, sowing = estimate_pdate(fpath_weas,
site,
year,
gdd)
# Standard pdate across asll sites
else:
sowing = f'{dict_setup["init"]["plant_date"]}{year}'
sowing = "'" + sowing + "'" # requires single quote
start = f'{dict_setup["init"]["start_date"]}{year}'
start = "'" + start + "'"
end = f'{dict_setup["init"]["end_date"]}{year}'
end = "'" + end + "'"
# set up init.txt text strings
pop = df_params.loc[int(cultivar.split('_')[-1]), 'pop']
str1 = '*** initialization data ***\n'
str2 = ('poprow\trowsp\tplant_density\trowang\t'
'x_seed\ty_seed\ttab\tCEC\teomult\tco2\n')
str3 = (f'{pop*(dict_setup["init"]["rowsp"])/100:.1f}\t'
f'{dict_setup["init"]["rowsp"]:.1f}\t'
f'{pop:.1f}\t'
f'{dict_setup["init"]["rowang"]:.1f}\t'
f'{dict_setup["init"]["x_seed"]:.1f}\t'
f'{dict_setup["init"]["y_seed"]:.1f}\t'
f'{dict_setup["init"]["cec"]:.2f}\t'
f'{dict_setup["init"]["eomult"]:.2f}\t'
f'{dict_setup["init"]["co2"]:.0f}\n')
str4 = 'latitude\tlongitude\taltitude\n'
str5 = (f'{lat:.2f}\t'
f'{lon:.2f}\t'
f'{dict_setup["init"]["alt"]:.2f}\n')
str6 = 'autoirrigate\n'
str7 = f'{dict_setup["init"]["irrigate"]}\n'
str8 = 'begin\tsowing\tend\ttimestep (mins)\n'
str9 = (start + '\t' + sowing + '\t' + end + '\t'
f'{dict_setup["init"]["timestep"]:.0f}\n')
str10 = ('output soils data (g03, g04, g05, and g06 files)'
' 1 if true\n')
str11 = 'no soil files\toutputsoil files\n'
if dict_setup["init"]["soil"]:
str12 = '0\t1\n'
else:
str12 = '1\t0\n'
strings = [str1, str2, str3, str4, str5, str6,
str7, str8, str9, str10, str11, str12]
init_txt.writelines(strings)
init_txt.close()
# *** time.txt
time_txt = open(os.path.join(dirct_init,
cultivar,
siteyear,
'time.txt'), 'w')
# set up text strings
str1 = '*** synchronizer information ***\n'
str2 = 'initial time\tdt\tdtmin\tdmul1\tdmul2\ttfin\n'
str3 = (start + '\t' +
f'{dict_setup["time"]["dt"]}\t'
f'{dict_setup["time"]["dt_min"]}\t'
f'{dict_setup["time"]["dmul1"]}\t'
f'{dict_setup["time"]["dmul2"]}\t' +
end + '\n')
str4 = 'output variables, 1 if true\tDaily\tHourly\n'
if dict_setup['time']['output_timestep'] == 'hourly':
output_timestep = '0\t1\n'
else:
output_timestep = '1\t0\n'
str5 = output_timestep
str6 = 'weather data, 1 if true\tDaily\tHourly\n'
if dict_setup['time']['input_timestep'] == 'hourly':
input_timestep = '0\t1\n'
else:
input_timestep = '1\t0\n'
str7 = input_timestep
strings = [str1, str2, str3, str4, str5, str6, str7]
time_txt.writelines(strings)
time_txt.close()
# *** climate.txt
climate_txt = open(os.path.join(dirct_init,
cultivar,
siteyear,
'climate.txt'), 'w')
# put together txt strings
str1 = '*** standard meteorological data ***\n'
str2 = 'latitude\n'
str3 = f'{lat}\n'
str4 = ('daily bulb temp, daily wind, rain intensity, '
'daily conc, furrow, relative humidity, co2\n')
str5 = (f'{dict_setup["climate"]["daily_bulb"]}\t'
f'{dict_setup["climate"]["daily_wind"]}\t'
f'{dict_setup["climate"]["rain_intensity"]}\t'
f'{dict_setup["climate"]["daily_conc"]}\t'
f'{dict_setup["climate"]["furrow"]}\t'
f'{dict_setup["climate"]["relative_humidity"]}\t'
f'{dict_setup["climate"]["daily_co2"]}\n')
str6 = ('parameters for unit conversion:'
'BSOLAR BTEMP ATEMP ERAIN BWIND BIR\n')
str7 = 'BSOLAR is 1e6/3600 to go from jm-2h-1 to wm-2\n'
str8 = (f'{dict_setup["climate"]["bsolar"]}\t'
f'{dict_setup["climate"]["btemp"]}\t'
f'{dict_setup["climate"]["atemp"]}\t'
f'{dict_setup["climate"]["erain"]}\t'
f'{dict_setup["climate"]["bwind"]}\t'
f'{dict_setup["climate"]["bir"]}\n')
str9 = 'average values for the site\n'
str10 = 'WINDA\tIRAV\tConc\tCO2\n'
str11 = (f'{dict_setup["climate"]["winda"]}\t'
f'{dict_setup["climate"]["conc"]}\n')
strings = [str1, str2, str3, str4, str5,
str6, str7, str8, str9, str10, str11]
climate_txt.writelines(strings)
climate_txt.close()
# *** management.txt
management_txt = open(os.path.join(dirct_init,
cultivar,
siteyear,
'management.txt'), 'w')
# addressing N application date according to dynamic pdate
sowing_date = pd.to_datetime(sowing, format="'%m/%d/%Y'")
appl_date1 = sowing_date + | pd.DateOffset(days=14) | pandas.DateOffset |
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__all__ = ["MiniRocket"]
import multiprocessing
import numpy as np
import pandas as pd
from numba import get_num_threads, njit, prange, set_num_threads, vectorize
from sktime.transformations.base import BaseTransformer
class MiniRocket(BaseTransformer):
"""MINIROCKET.
MINImally RandOm Convolutional KErnel Transform
**Univariate**
Unviariate input only. Use class MiniRocketMultivariate for multivariate
input.
@article{dempster_etal_2020,
author = {<NAME> Schmidt, <NAME> and Webb, <NAME>},
title = {{MINIROCKET}: A Very Fast (Almost) Deterministic Transform for
Time Series Classification},
year = {2020},
journal = {arXiv:2012.08791}
}
Parameters
----------
num_kernels : int, number of random convolutional kernels
(default 10,000)
max_dilations_per_kernel : int, maximum number of dilations per kernel (default 32)
n_jobs : int, optional (default=1) The number of jobs to run in
parallel for `transform`. ``-1`` means using all processors.
random_state : int, random seed (optional, default None)
"""
_tags = {
"univariate-only": True,
"fit_is_empty": False,
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Primitives",
# what is the scitype of y: None (not needed), Primitives, Series, Panel
"scitype:instancewise": False, # is this an instance-wise transform?
"X_inner_mtype": "numpy3D", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for X?
}
def __init__(
self,
num_kernels=10_000,
max_dilations_per_kernel=32,
n_jobs=1,
random_state=None,
):
self.num_kernels = num_kernels
self.max_dilations_per_kernel = max_dilations_per_kernel
self.n_jobs = n_jobs
self.random_state = random_state
super(MiniRocket, self).__init__()
def _fit(self, X, y=None):
"""Fits dilations and biases to input time series.
Parameters
----------
X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
panel of time series to transform
y : ignored argument for interface compatibility
Returns
-------
self
"""
random_state = (
np.int32(self.random_state) if isinstance(self.random_state, int) else None
)
X = X[:, 0, :].astype(np.float32)
_, n_timepoints = X.shape
if n_timepoints < 9:
raise ValueError(
(
f"n_timepoints must be >= 9, but found {n_timepoints};"
" zero pad shorter series so that n_timepoints == 9"
)
)
self.parameters = _fit(
X, self.num_kernels, self.max_dilations_per_kernel, random_state
)
return self
def _transform(self, X, y=None):
"""Transform input time series.
Parameters
----------
X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
panel of time series to transform
y : ignored argument for interface compatibility
Returns
-------
pandas DataFrame, transformed features
"""
X = X[:, 0, :].astype(np.float32)
# change n_jobs dependend on value and existing cores
prev_threads = get_num_threads()
if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():
n_jobs = multiprocessing.cpu_count()
else:
n_jobs = self.n_jobs
set_num_threads(n_jobs)
X_ = _transform(X, self.parameters)
set_num_threads(prev_threads)
return | pd.DataFrame(X_) | pandas.DataFrame |
import pandas as pd
import os
import numpy as np
import gc
import copy
import datetime
import warnings
from tqdm import tqdm
from scipy import sparse
from numpy import array
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfTransformer
###############################################
#########数据加载
###############################################
user_app = pd.read_csv('../../data/processed_data/user_app.csv', dtype={'uId':np.int32, 'appId':str})
app_info = pd.read_csv('../../data/processed_data/app_info.csv', dtype={'appId':str, 'category':int})
###############################################
########## 压缩函数
###############################################
# 对数据进行类型压缩,节约内存
from tqdm import tqdm_notebook
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
# 用法:
# baseSet=memory_preprocess._memory_process(baseSet)
###############################################
########## 统计用户安装app的数量和占比
###############################################
app_counts = user_app[['appId']].drop_duplicates().count()
userSub = user_app.groupby('uId')['appId'].nunique().reset_index().rename(columns={'appId': 'user_app_active_counts'})
userSub['user_app_active_ratio'] = userSub['user_app_active_counts'].apply(lambda x: x/app_counts)
del app_counts
user_app_active_counts = userSub.copy()
###############################################
########统计用户每个年龄段安装的app
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = pd.merge(age_train, user_app, how='left', on='uId')
userSub=pd.pivot_table(userSub, values='uId', index=['appId'],columns=['age_group'],aggfunc='count', fill_value=0)
userSub['sum']=userSub.sum(axis=1)
userSub= userSub.reset_index()
userSub.rename(columns={1:'age_1',2:'age_2',3:'age_3',4:'age_4',5:'age_5',6:'age_6'},inplace=True)
userSub.drop(axis=0, index=0, inplace=True)
userSub['age1_%']= userSub.apply(lambda x: round(x['age_1']/x['sum'],2),axis=1)
userSub['age2_%']= userSub.apply(lambda x: round(x['age_2']/x['sum'],2),axis=1)
userSub['age3_%']= userSub.apply(lambda x: round(x['age_3']/x['sum'],2),axis=1)
userSub['age4_%']= userSub.apply(lambda x: round(x['age_4']/x['sum'],2),axis=1)
userSub['age5_%']= userSub.apply(lambda x: round(x['age_5']/x['sum'],2),axis=1)
userSub['age6_%']= userSub.apply(lambda x: round(x['age_6']/x['sum'],2),axis=1)
age1 = userSub[(userSub['age1_%'] >= 0.3)][['appId']].copy()
age1['age_num1'] = 1
age2 = userSub[(userSub['age2_%'] >= 0.6)][['appId']].copy()
age2['age_num2'] = 1
age3 = userSub[(userSub['age3_%'] >= 0.6)][['appId']].copy()
age3['age_num3'] = 1
age4 = userSub[(userSub['age4_%'] >= 0.6)][['appId']].copy()
age4['age_num4'] = 1
age5 = userSub[(userSub['age5_%'] >= 0.3)][['appId']].copy()
age5['age_num5'] = 1
age6 = userSub[(userSub['age6_%'] >= 0.3)][['appId']].copy()
age6['age_num6'] = 1
userSub = pd.merge(user_app, age1, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age2, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age3, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age4, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age5, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age6, how='left', on='appId').fillna(0)
userSub = userSub.groupby('uId').sum().reset_index()
user_active_app_age = userSub.copy()
###############################################
########## 用户安装各app类型的数量
###############################################
userSub = pd.merge(user_app, app_info, how='left', on='appId').fillna(method='pad')
userSub = pd.pivot_table(userSub, values='appId', index=['uId'],columns=['category'], aggfunc='count', fill_value=0).reset_index()
userSub['use_app_cate_nums']=0
for i in range(25):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(26,30):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(34,36):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(25):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
for i in range(26,30):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
for i in range(34,36):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
user_active_category_counts = userSub.copy()
###############################################
########## 用户安装了多少种app类型
###############################################
userSub = pd.merge(user_app, app_info, how='left', on='appId').fillna(method='pad')
userSub = userSub[['uId', 'category']].groupby('uId')['category'].nunique().reset_index()
userSub.rename(columns={'category': 'active_cate_nums'}, inplace=True)
user_active_cate_nums = userSub.copy()
###############################################
########## 计算每个app的目标客户年龄指数
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = pd.merge(age_train, user_app, how='left', on='uId')
userSub=pd.pivot_table(userSub, values='uId', index=['appId'],columns=['age_group'],
aggfunc='count', fill_value=0)
userSub['sum']=userSub.sum(axis=1)
userSub= userSub.reset_index()
userSub.rename(columns={1:'age_1',2:'age_2',3:'age_3',4:'age_4',5:'age_5',6:'age_6'},inplace=True)
userSub.drop(axis=0, index=0, inplace=True)
userSub['age1_%']= userSub.apply(lambda x: round(x['age_1']/x['sum'],2),axis=1)
userSub['age2_%']= userSub.apply(lambda x: round(x['age_2']/x['sum'],2),axis=1)
userSub['age3_%']= userSub.apply(lambda x: round(x['age_3']/x['sum'],2),axis=1)
userSub['age4_%']= userSub.apply(lambda x: round(x['age_4']/x['sum'],2),axis=1)
userSub['age5_%']= userSub.apply(lambda x: round(x['age_5']/x['sum'],2),axis=1)
userSub['age6_%']= userSub.apply(lambda x: round(x['age_6']/x['sum'],2),axis=1)
# 计算每个app的目标客户年龄指数(计算方法 :sum(app在该年龄段N安装比例 * 年龄段数值N * 10 / 对应年龄段样本比例))
userSub['age_weight']=userSub.apply(lambda x:(10*x['age1_%']/0.03 +20*x['age2_%']/0.2 +30*x['age3_%']/0.3 +40*x['age4_%']/0.25 +50*x['age5_%']/0.15 +60*x['age6_%']/0.075) ,axis=1)
userSub=userSub[['appId','age_weight']]
userSub= | pd.merge(user_app,userSub,how='left',on='appId') | pandas.merge |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from colors import bluered10
def open_vm(paths_object, channel, scale, trials=None):
data_path = paths_object.biophys_output
file_name = f'wns_{channel}_{scale}_v.dat'
file_path = os.path.join(data_path, file_name)
data = pd.read_csv(file_path, sep=r'\s+', header=None, usecols=trials)
return data
def open_im(paths_object, channel, scale, trials=None):
data_path = paths_object.biophys_output
file_name = f'wns_{channel}_{scale}_i.dat'
file_path = os.path.join(data_path, file_name)
data = | pd.read_csv(file_path, sep=r'\s+', header=None, usecols=trials) | pandas.read_csv |
import re
import datetime as dt
import numpy as np
import pandas as pd
from path import Path
from PIL import Image
import base64
from io import BytesIO
import plotly
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from skimage import io
import onion_trees as ot
import visualize as bv
import json
import statsmodels as sm
from statsmodels.formula.api import ols
from data import STATE2ABBREV, COUNTY_CORRECTIONS
import bjorn_support as bs
import mutations as bm
def load_img(img_filepath):
img = io.imread(img_filepath)
pil_img = Image.fromarray(img) # PIL image object
prefix = "data:image/png;base64,"
with BytesIO() as stream:
pil_img.save(stream, format="png")
base64_string = prefix + base64.b64encode(stream.getvalue()).decode("utf-8")
fig = go.Figure(go.Image(source=base64_string))
fig.update_layout(margin=dict(l=0, r=0, b=0, t=0),
coloraxis_showscale=False, template='plotly_white', autosize=True)
fig.update_xaxes(showticklabels=False).update_yaxes(showticklabels=False)
return fig
def world_time_relative(data, feature, values, res, strain='B117', vocs=['B.1.1.7', 'B.1.1.70']):
if len(values)==1:
data.loc[:, 'weekday'] = data['date'].dt.weekday
data.loc[:, 'date'] = data['date'] - data['weekday'] * dt.timedelta(days=1)
results = (data.loc[(data[feature]==values[0])]
.drop_duplicates(subset=['date', 'strain']))
total_samples = (data[(~data['pangolin_lineage'].isin(vocs))]
.groupby('date')
.agg(total_samples=('strain', 'nunique')))
else:
res = res.copy()
# res.loc[:, 'tmp'] = res['date'].str.split('-')
# res = res[res['tmp'].str.len()>=3]
# res.loc[:, 'date'] = pd.to_datetime(res['date'], errors='coerce')
res.loc[:, 'weekday'] = res['date'].dt.weekday
res.loc[:, 'date'] = res['date'] - res['weekday'] * dt.timedelta(days=1)
total_samples = (res[(~res['pangolin_lineage'].isin(vocs))]
.groupby('date')
.agg(total_samples=('strain', 'nunique'))
.reset_index())
results = res[(res['is_vui']==True)].drop_duplicates(subset=['date', 'strain'])
b117_world_time = (results.groupby('date')
.agg(num_samples=('strain', 'nunique'),
country_counts=('country',
lambda x: np.unique(x,
return_counts=True)),
divisions=('division', 'unique'),
locations=('location', 'unique'))
.reset_index())
b117_world_time.loc[:, 'countries'] = b117_world_time['country_counts'].apply(lambda x: list(x[0]))
b117_world_time.loc[:, 'country_counts'] = b117_world_time['country_counts'].apply(lambda x: list(x[1]))
b117_world_time = pd.merge(b117_world_time, total_samples, on='date', how='right')
b117_world_time.loc[:, ['countries', 'divisions', 'locations']].fillna('', inplace=True)
b117_world_time.loc[:, ['num_samples', 'total_samples']] = b117_world_time[['num_samples', 'total_samples']].fillna(0)
first_detected = b117_world_time.loc[b117_world_time['num_samples']>0]['date'].min()
first_countries = b117_world_time.loc[b117_world_time['date']==first_detected, 'countries'].values[0]
b117_world_time = b117_world_time[b117_world_time['date']>=first_detected]
b117_world_time['cum_num_samples'] = b117_world_time['num_samples'].cumsum()
b117_world_time.loc[:, 'cum_total_samples'] = b117_world_time['total_samples'].cumsum()
b117_world_time.loc[:, 'rel_freq'] = b117_world_time['cum_num_samples'] / b117_world_time['cum_total_samples']
fig = go.Figure(data=go.Scatter(y=b117_world_time['rel_freq'],
x=b117_world_time['date'],
name='B.1.1.7 samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_world_time[['num_samples', 'countries', 'country_counts',
'divisions', 'locations',
'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>Country(s) Reported: %{text[1]}</b><br>" +
"<b>Cases Per Country: %{text[2]}</b><br>" +
"<b>State(s) Reported: %{text[3]}</b><br>" +
"<b>County(s) Reported: %{text[4]}</b><br>" +
"<b>Date: %{text[5]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_world_time.loc[b117_world_time['date']==first_detected, 'rel_freq'].values[0],
text=f"On Earth, {strain} 1st detected in <br> {', '.join(first_countries)} <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-250, ax=100)
fig.update_layout(yaxis_title=f'Relative cumulative frequency of {strain} on Earth',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
fig.update_yaxes(side = 'right')
return fig
def world_time(data, feature, values, res, strain='B117', sampling_type='random'):
if len(values)==1:
results = (data.loc[(data[feature]==values[0])]
.drop_duplicates(subset=['date', 'strain']))
else:
# results = (data.groupby(['date', 'country', 'division', 'purpose_of_sequencing',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
results = (res[(res['is_vui']==True)]
.drop_duplicates(subset=['date', 'strain']))
b117_world_time = (results.groupby('date')
.agg(num_samples=('strain', 'nunique'),
country_counts=('country',
lambda x: np.unique(x,
return_counts=True)),
divisions=('division', 'unique'),
locations=('location', 'unique'))
.reset_index())
b117_world_time.loc[:, 'countries'] = b117_world_time['country_counts'].apply(lambda x: list(x[0]))
b117_world_time.loc[:, 'country_counts'] = b117_world_time['country_counts'].apply(lambda x: list(x[1]))
b117_world_time.loc[:, 'date'] = pd.to_datetime(b117_world_time['date'],
errors='coerce')
b117_world_time['cum_num_samples'] = b117_world_time['num_samples'].cumsum()
first_detected = b117_world_time['date'].min()
first_countries = b117_world_time.loc[b117_world_time['date']==first_detected, 'countries']
fig = go.Figure(data=go.Scatter(y=b117_world_time['cum_num_samples'],
x=b117_world_time['date'],
name='B.1.1.7 samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_world_time[['num_samples', 'countries', 'country_counts',
'divisions', 'locations',
'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>Country(s) Reported: %{text[1]}</b><br>" +
"<b>Cases Per Country: %{text[2]}</b><br>" +
"<b>State(s) Reported: %{text[3]}</b><br>" +
"<b>County(s) Reported: %{text[4]}</b><br>" +
"<b>Date: %{text[5]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_world_time.loc[b117_world_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"On Earth, {strain} 1st detected in <br> {', '.join(first_countries.values[0])} <br> on <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-250, ax=100)
fig.update_layout(yaxis_title='Global cumulative number of cases over time',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
fig.update_yaxes(side = 'right')
return fig
def us_time_relative(data, feature, values, res, strain='B117', country='USA', vocs=['B.1.1.7', 'B.1.1.70']):
if len(values)==1:
data.loc[:, 'weekday'] = data['date'].dt.weekday
data.loc[:, 'date'] = data['date'] - data['weekday'] * dt.timedelta(days=1)
results = (data.loc[(data[feature]==values[0])&
(data['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
total_samples = (data[(~data['pangolin_lineage'].isin(vocs))&
(data['country']=='United States of America')]
.groupby('date')
.agg(total_samples=('strain', 'nunique')))
else:
# results = (data.groupby(['date', 'country', 'division', 'purpose_of_sequencing',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
res = res.copy()
# res.loc[:, 'tmp'] = res['date'].str.split('-')
# res = res[res['tmp'].str.len()>=3]
# res.loc[:, 'date'] = pd.to_datetime(res['date'], errors='coerce')
res.loc[:, 'weekday'] = res['date'].dt.weekday
res.loc[:, 'date'] = res['date'] - res['weekday'] * dt.timedelta(days=1)
total_samples = (res[(~res['pangolin_lineage'].isin(vocs))
&(res['country']=='United States of America')]
.groupby('date')
.agg(total_samples=('strain', 'nunique'))
.reset_index())
results = (res[(res['is_vui']==True)
& (res['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
results['purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_us_time = (random.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
b117_us_time.loc[:, 'states'] = b117_us_time['state_counts'].apply(lambda x: list(x[0]))
b117_us_time.loc[:, 'state_counts'] = b117_us_time['state_counts'].apply(lambda x: list(x[1]))
b117_us_time = pd.merge(b117_us_time, total_samples, on='date', how='right')
b117_us_time.loc[:, 'states'].fillna('', inplace=True)
b117_us_time.loc[:, ['num_samples', 'total_samples']] = b117_us_time[['num_samples', 'total_samples']].fillna(0)
sdrop_us_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
sdrop_us_time.loc[:, 'states'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[0]))
sdrop_us_time.loc[:, 'state_counts'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[1]))
sdrop_us_time = pd.merge(sdrop_us_time, total_samples, on='date', how='right')
sdrop_us_time.loc[:, 'states'].fillna('', inplace=True)
sdrop_us_time.loc[:, ['num_samples', 'total_samples']] = sdrop_us_time[['num_samples', 'total_samples']].fillna(0)
fig = go.Figure()
if b117_us_time[b117_us_time['num_samples']>0].shape[0] > 0:
first_detected = b117_us_time.loc[b117_us_time['num_samples']>0]['date'].min()
first_states = b117_us_time.loc[b117_us_time['date']==first_detected, 'states'].values[0]
b117_us_time = b117_us_time[b117_us_time['date']>=first_detected]
b117_us_time.loc[:, 'cum_num_samples'] = b117_us_time['num_samples'].cumsum()
b117_us_time.loc[:, 'cum_total_samples'] = b117_us_time['total_samples'].cumsum()
b117_us_time.loc[:, 'rel_freq'] = b117_us_time['cum_num_samples'] / b117_us_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=b117_us_time['rel_freq'],
x=b117_us_time['date'],
name=f'{strain} samples',
mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_us_time.loc[b117_us_time['date']==first_detected, 'rel_freq'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states)} <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
if sdrop_us_time[sdrop_us_time['num_samples']>0].shape[0] > 0:
first_detected = sdrop_us_time.loc[sdrop_us_time['num_samples']>0]['date'].min()
first_states = sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'states'].values[0]
sdrop_us_time = sdrop_us_time[sdrop_us_time['date']>=first_detected]
sdrop_us_time.loc[:, 'cum_num_samples'] = sdrop_us_time['num_samples'].cumsum()
sdrop_us_time.loc[:, 'cum_total_samples'] = sdrop_us_time['total_samples'].cumsum()
sdrop_us_time.loc[:, 'rel_freq'] = sdrop_us_time['cum_num_samples'] / sdrop_us_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=sdrop_us_time['rel_freq'],
x=sdrop_us_time['date'],
name='biased sampling <br> (more info in a later section)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
fig.add_annotation(x=first_detected,
y=sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'rel_freq'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states)} <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Relative cumulative frequency of {strain} in USA',
xaxis_title='Collection Date',
template='plotly_white', autosize=True, showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))#, height=850,
return fig
def us_time(data, feature, values, res, strain='B117', country='USA', sampling_type='random'):
if len(values)==1:
results = (data.loc[(data[feature]==values[0]) &
(data['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
else:
# results = (data.groupby(['date', 'country', 'division', 'purpose_of_sequencing',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
results = (res[(res['is_vui']==True)
& (res['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
if sampling_type!='random':
results['purpose_of_sequencing'] = 'S'
else:
results['purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_us_time = (random.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
b117_us_time.loc[:, 'states'] = b117_us_time['state_counts'].apply(lambda x: list(x[0]))
b117_us_time.loc[:, 'state_counts'] = b117_us_time['state_counts'].apply(lambda x: list(x[1]))
b117_us_time.loc[:, 'date'] = pd.to_datetime(b117_us_time['date'],
errors='coerce')
b117_us_time['cum_num_samples'] = b117_us_time['num_samples'].cumsum()
sdrop_us_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
sdrop_us_time.loc[:, 'states'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[0]))
sdrop_us_time.loc[:, 'state_counts'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[1]))
sdrop_us_time.loc[:, 'date'] = pd.to_datetime(sdrop_us_time['date'],
errors='coerce')
sdrop_us_time['cum_num_samples'] = sdrop_us_time['num_samples'].cumsum()
fig = go.Figure()
if b117_us_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=b117_us_time['cum_num_samples'],
x=b117_us_time['date'],
name=f'{strain} samples',
mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
first_detected = b117_us_time['date'].min()
first_states = b117_us_time.loc[b117_us_time['date']==first_detected, 'states']
fig.add_annotation(x=first_detected,
y=b117_us_time.loc[b117_us_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states.values[0])} <br> on <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
if sdrop_us_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=sdrop_us_time['cum_num_samples'],
x=sdrop_us_time['date'],
name='biased sampling <br> (see notes on sampling)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
first_detected = sdrop_us_time['date'].min()
first_states = sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'states']
fig.add_annotation(x=first_detected,
y=sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states.values[0])} <br> on <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Cumulative number of cases over time in {country}',
xaxis_title='Collection Date',
template='plotly_white', autosize=True, showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))#, height=850,
return fig
def ca_time_relative(data, feature, values, res,
strain='B117', state='California',
vocs=['B.1.1.7', 'B.1.1.70']):
if len(values)==1:
data.loc[:, 'weekday'] = data['date'].dt.weekday
data.loc[:, 'date'] = data['date'] - data['weekday'] * dt.timedelta(days=1)
results = (data.loc[(data[feature]==values[0])&
(data['division']==state)]
.drop_duplicates(subset=['date', 'strain']))
total_samples = (data[(~data['pangolin_lineage'].isin(vocs))&
(data['division']==state)]
.groupby('date')
.agg(total_samples=('strain', 'nunique')))
else:
res = res.copy()
# res.loc[:, 'tmp'] = res['date'].str.split('-')
# res = res[res['tmp'].str.len()>=3]
# res.loc[:, 'date'] = pd.to_datetime(res['date'], errors='coerce')
res.loc[:, 'weekday'] = res['date'].dt.weekday
res.loc[:, 'date'] = res['date'] - res['weekday'] * dt.timedelta(days=1)
total_samples = (res[(~res['pangolin_lineage'].isin(vocs))
&(res['division']==state)]
.groupby('date')
.agg(total_samples=('strain', 'nunique'))
.reset_index())
results = res[(res['is_vui']==True)
& (res['division']==state)].drop_duplicates(subset=['date', 'strain'])
results.loc[:, 'purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_ca_time = (random.groupby('date')
.agg(num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True)))
.reset_index())
b117_ca_time.loc[:, 'counties'] = b117_ca_time['county_counts'].apply(lambda x: list(x[0]))
b117_ca_time.loc[:, 'county_counts'] = b117_ca_time['county_counts'].apply(lambda x: list(x[1]))
# b117_ca_time.loc[:, 'date'] = pd.to_datetime(b117_ca_time['date'],
# errors='coerce')
b117_ca_time = pd.merge(b117_ca_time, total_samples, on='date', how='right')
b117_ca_time.loc[:, 'counties'].fillna('', inplace=True)
b117_ca_time.loc[:, ['num_samples', 'total_samples']] = b117_ca_time[['num_samples', 'total_samples']].fillna(0)
sdrop_ca_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True))
)
.reset_index())
sdrop_ca_time.loc[:, 'counties'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[0]))
sdrop_ca_time.loc[:, 'county_counts'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[1]))
# sdrop_ca_time.loc[:, 'date'] = pd.to_datetime(sdrop_ca_time['date'], errors='coerce')
sdrop_ca_time = pd.merge(sdrop_ca_time, total_samples, on='date', how='right')
sdrop_ca_time.loc[:, 'counties'].fillna('', inplace=True)
sdrop_ca_time.loc[:, ['num_samples', 'total_samples']].fillna(0, inplace=True)
fig = go.Figure()
if b117_ca_time[b117_ca_time['num_samples']>0].shape[0] > 0:
first_detected = b117_ca_time.loc[b117_ca_time['num_samples']>0]['date'].min()
first_counties = b117_ca_time.loc[b117_ca_time['date']==first_detected, 'counties'].values[0]
b117_ca_time = b117_ca_time[b117_ca_time['date']>=first_detected]
b117_ca_time.loc[:, 'cum_num_samples'] = b117_ca_time['num_samples'].cumsum()
b117_ca_time.loc[:, 'cum_total_samples'] = b117_ca_time['total_samples'].cumsum()
b117_ca_time.loc[:, 'rel_freq'] = b117_ca_time['cum_num_samples'] / b117_ca_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=b117_ca_time['rel_freq'],
x=b117_ca_time['date'],
name=f'{strain} samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>County(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per County: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_ca_time.loc[b117_ca_time['date']==first_detected, 'rel_freq'].values[0],
text=f"In CA, {strain} 1st detected in <br> {', '.join(first_counties)} county(s) <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
if sdrop_ca_time[sdrop_ca_time['num_samples']>0].shape[0] > 0:
first_detected = sdrop_ca_time.loc[sdrop_ca_time['num_samples']>0]['date'].min()
first_counties = sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'counties'].values[0]
sdrop_ca_time = sdrop_ca_time[sdrop_ca_time['date']>=first_detected]
sdrop_ca_time.loc[:, 'cum_num_samples'] = sdrop_ca_time['num_samples'].cumsum()
sdrop_ca_time.loc[:, 'cum_total_samples'] = sdrop_ca_time['total_samples'].cumsum()
sdrop_ca_time.loc[:, 'rel_freq'] = sdrop_ca_time['cum_num_samples'] / sdrop_ca_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=sdrop_ca_time['rel_freq'],
x=sdrop_ca_time['date'],
name='biased sampling (read next section)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"
)
)
fig.add_annotation(x=first_detected,
y=sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'rel_freq'].values[0],
text=f"""In CA, {strain} 1st detected in <br> {', '.join(first_counties)} county(s) <br> on week of <br> {first_detected.date()}""",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Relative cumulative frequency of {strain} in CA',
xaxis_title='Collection Date',
template='plotly_white', showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
),
autosize=True#, autosize=True
)#, height=850,
return fig
def ca_time(data, feature, values, res, strain='B117', state='California', sampling_type='random'):
if len(values)==1:
results = (data.loc[(data[feature]==values[0]) &
(data['division']==state)]
.drop_duplicates(subset=['date', 'strain']))
else:
# results = (data.groupby(['date', 'country', 'division',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
results = res[(res['is_vui']==True)
&(res['division']==state)].drop_duplicates(subset=['date', 'strain'])
if sampling_type!='random':
results['purpose_of_sequencing'] = 'S'
else:
results['purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_ca_time = (random.groupby('date')
.agg(num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True)))
.reset_index())
b117_ca_time.loc[:, 'counties'] = b117_ca_time['county_counts'].apply(lambda x: list(x[0]))
b117_ca_time.loc[:, 'county_counts'] = b117_ca_time['county_counts'].apply(lambda x: list(x[1]))
b117_ca_time.loc[:, 'date'] = pd.to_datetime(b117_ca_time['date'],
errors='coerce')
b117_ca_time.loc[:, 'cum_num_samples'] = b117_ca_time['num_samples'].cumsum()
sdrop_ca_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True))
)
.reset_index())
sdrop_ca_time.loc[:, 'counties'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[0]))
sdrop_ca_time.loc[:, 'county_counts'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[1]))
sdrop_ca_time.loc[:, 'date'] = pd.to_datetime(sdrop_ca_time['date'], errors='coerce')
sdrop_ca_time['cum_num_samples'] = sdrop_ca_time['num_samples'].cumsum()
fig = go.Figure()
if b117_ca_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=b117_ca_time['cum_num_samples'],
x=b117_ca_time['date'],
name=f'{strain} samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>County(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per County: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
first_detected = b117_ca_time['date'].min()
first_counties = b117_ca_time.loc[b117_ca_time['date']==first_detected, 'counties']
fig.add_annotation(x=first_detected,
y=b117_ca_time.loc[b117_ca_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In CA, {strain} 1st detected in <br> {', '.join(first_counties.values[0])} <br> on {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
if sdrop_ca_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=sdrop_ca_time['cum_num_samples'],
x=sdrop_ca_time['date'],
name='biased sampling <br> (see notes on sampling)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"
)
)
first_detected = sdrop_ca_time['date'].min()
first_counties = sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'counties']
fig.add_annotation(x=first_detected,
y=sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In CA, {strain} 1st detected in <br> {', '.join(first_counties.values[0])} <br> on {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Cumulative number of {strain} in CA',
xaxis_title='Collection Date',
template='plotly_white', autosize=True, showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))#, height=850,
return fig
def strain_nt_distance(data, feature, values, strain='B117', sample_sz=250, vocs=['B.1.1.7', 'B.1.351', 'B.1.1.70']):
clock_rate = 8e-4
if feature=='pangolin_lineage':
dists_df = create_lineage_data(data, feature, values, strain=strain, sample_sz=sample_sz, vocs=vocs)
elif feature=='mutation':
dists_df = create_distance_data(data, mutations=set(values), strain=strain, sample_sz=sample_sz, vocs=vocs)
else:
raise ValueError(f"Feature of type {feature} is not yet available for analysis. Aborting...")
dists_df['num_subs'] = dists_df['mutations'].str.len() / 29904
# ignore seqs with unexpectedly high dists
dists_df = dists_df[dists_df['num_subs']<=0.0013]
dists_df = dists_df[~dists_df['date'].isna()]
dists_df.loc[:, 'date'] = pd.to_datetime(dists_df['date'], errors='coerce')
dists_df['time'] = dists_df['date'].astype(str).apply(bv.decimal_date)
b117_model = ols('num_subs ~ time', data=dists_df[dists_df['group']!='outgroup']).fit()
b117_model.params['time'] = clock_rate
b117_preds = dists_df[dists_df['group']!='outgroup'].copy()
b117_model.params['Intercept'] = np.mean(b117_preds['num_subs'] - (clock_rate*b117_preds['time']))
b117_preds.loc[:, 'predictions'] = b117_model.predict(b117_preds['time'])
b117_n = int(b117_preds.shape[0] / 2)
outgrp_model = ols('num_subs ~ time',
data=dists_df[dists_df['group']=='outgroup']).fit()
outgrp_model.params['time'] = clock_rate
outgrp_preds = dists_df[dists_df['group']=='outgroup'].copy()
outgrp_model.params['Intercept'] = np.mean(outgrp_preds['num_subs'] - (clock_rate*outgrp_preds['time']))
outgrp_preds.loc[:, 'predictions'] = outgrp_model.predict(outgrp_preds['time'])
outgrp_n = int(outgrp_preds.shape[0] / 3)
fig = go.Figure(
data=go.Scatter(y=dists_df[dists_df['group']==f'Lineage {strain} in US']['num_subs'],
x=dists_df[dists_df['group']==f'Lineage {strain} in US']['date'],
name=f'{strain} (US)', mode='markers',
hovertemplate =
'Sample: %{text}',
marker_color='rgba(220,20,60,.6)'))
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']==f'Lineage {strain}']['num_subs'],
x=dists_df[dists_df['group']==f'Lineage {strain}']['date'],
mode='markers', marker_color='rgba(30,144,255,.6)',
name=f'{strain} (non-US)'
))
# fig.add_trace(go.Scatter(y=b117_preds['predictions'],
# x=b117_preds['date'], name='OLS (B.1.1.7)',
# mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=b117_preds.iloc[b117_n]['date'],
y=b117_preds.iloc[b117_n]['predictions'],
text=f"{strain} Lineage",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='outgroup']['num_subs'],
x=dists_df[dists_df['group']=='outgroup']['date'],
mode='markers', marker_color='rgb(211,211,211, .6)',
name='outgroup'
))
# fig.add_trace(go.Scatter(y=outgrp_preds['predictions'],
# x=outgrp_preds['date'], name='OLS (outgroup)',
# mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=outgrp_preds.iloc[outgrp_n]['date'],
y=outgrp_preds.iloc[outgrp_n]['predictions'],
text=f"outgroup",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.update_layout(yaxis_title='Genetic Distance (root-to-tip)',
xaxis_title='Collection Date',
template='plotly_white', autosize=True,
margin={"l":1},
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
)
)#, height=850,
fig.update_yaxes(side = 'right')
return fig
def create_lineage_data(data, feature, values, strain, sample_sz=250, vocs=['B.1.1.7', 'B.1.1.70', 'B.1.351']):
data = (data.groupby(['date', 'country', 'division',
'location', 'pangolin_lineage', 'strain'])
.agg(mutations=('mutation', 'unique')).reset_index())
first_detected = data.loc[data[feature].isin(values), 'date'].min()
mutations = set(data.loc[(data[feature].isin(values))
&(data['date']==first_detected), 'mutations'].explode().unique())
data['d_w'] = data['mutations'].apply(compute_similarity, args=(mutations,))
outgroup = (data[(~data[feature].isin(values))
&(~data['pangolin_lineage'].isin(vocs))]
.nlargest(sample_sz, 'd_w')['strain']
.unique())
try:
ingroup = data.loc[(data[feature].isin(values))].sample(sample_sz)['strain'].unique()
except:
ingroup = data.loc[(data[feature].isin(values))]['strain'].unique()
usgroup = data.loc[(data[feature].isin(values)) & (data['country']=='United States of America'), 'strain'].unique()
data = data.loc[(data['strain'].isin(ingroup)) | (data['strain'].isin(outgroup)) | (data['strain'].isin(usgroup))]
data.loc[:, 'group'] = 'nan'
data.loc[data['strain'].isin(outgroup), 'group'] = 'outgroup'
data.loc[(data['strain'].isin(ingroup)), 'group'] = f'Lineage {strain}'
data.loc[(data['strain'].isin(usgroup)), 'group'] = f'Lineage {strain} in US'
return data
def create_distance_data(data: pd.DataFrame, mutations: set, strain: str,
sample_sz: int=250, vocs: list=['B.1.1.7', 'B.1.351']):
data = (data.groupby(['date', 'country', 'division',
'location', 'pangolin_lineage', 'strain'])
.agg(mutations=('mutation', 'unique')).reset_index())
data['is_vui'] = data['mutations'].apply(is_vui, args=(mutations,))
ref_muts = extract_mutations(data)
data['d_w'] = data['mutations'].apply(compute_similarity, args=(ref_muts,))
outgroup = (data[(data['is_vui']==False)
&(~data['pangolin_lineage'].isin(vocs))]
.sample(sample_sz)['strain']
.unique())
try:
ingroup = data.loc[(data['is_vui']==True)].sample(sample_sz)['strain'].unique()
except:
ingroup = data.loc[(data['is_vui']==True)]['strain'].unique()
usgroup = data.loc[(data['is_vui']==True) & (data['country']=='United States of America'), 'strain'].unique()
data = data.loc[(data['strain'].isin(ingroup)) | (data['strain'].isin(outgroup)) | (data['strain'].isin(usgroup))]
data['group'] = 'outgroup'
data.loc[(data['strain'].isin(ingroup)), 'group'] = f'Lineage {strain}'
data.loc[(data['strain'].isin(usgroup)), 'group'] = f'Lineage {strain} in US'
return data
def is_vui(x, mutations: set):
return mutations.issubset(set(x))
def extract_mutations(data: pd.DataFrame):
first_detected = data.loc[data['is_vui']==True, 'date'].min()
mutations = data.loc[(data['is_vui']==True)
&(data['date']==first_detected), 'mutations'].explode().unique()
return set(mutations)
def compute_similarity(x, reference_mutations: set):
common_mutations = set(x) & reference_mutations
return len(common_mutations)
def b117_nt_distance(gisaid_data, tree_fp, b117_meta, sample_sz=250, clock_rate=8e-4):
# nabla_symbol = u"\u2207"
croft_meta = pd.read_csv(b117_meta, sep='\t')
croft_meta = croft_meta[croft_meta['Country']!='USA'].copy()
# extract B117 samples from Emma Croft's build
b117_meta = croft_meta[croft_meta['Pangolin Lineage']=='B.1.1.7'].sample(sample_sz)
# extract outgroup samples from Emma Croft's build
outgrp_meta = croft_meta[croft_meta['Pangolin Lineage']!='B.1.1.7'].sample(sample_sz)
# extract B117 US samples from GISAID
us_b117 = gisaid_data[(gisaid_data['country']=='United States of America')
& (gisaid_data['pangolin_lineage']=='B.1.1.7')].copy()
# consolidate data and analyze
b117_data = gisaid_data[(gisaid_data['strain'].isin(b117_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(outgrp_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(us_b117['strain'].unique()))].copy()
b117_data.drop_duplicates(subset=['strain', 'pos', 'alt_codon'], inplace=True)
# b117_data = b117_data[b117_data['gene']=='S']
dists_df = (b117_data.groupby(['strain', 'date'])
.agg(num_nt_subs=('strain', 'count'))
.reset_index())
dists_df['num_nt_subs'] = dists_df['num_nt_subs'] / 29903
dists_df = dists_df[~dists_df['date'].isna()]
dists_df.loc[:, 'group'] = 'outgroup'
dists_df.loc[dists_df['strain'].isin(b117_meta['Strain'].unique()), 'group'] = 'B.1.1.7 (non-US)'
dists_df.loc[dists_df['strain'].isin(us_b117['strain'].unique()), 'group'] = 'B.1.1.7 (US)'
dists_df = dists_df.loc[~((dists_df['group']=='outgroup') & (dists_df['num_nt_subs']>=0.001))]
dists_df.loc[:, 'date'] = pd.to_datetime(dists_df['date'], errors='coerce')
dists_df['time'] = dists_df['date'].astype(str).apply(bv.decimal_date)
b117_model = ols('num_nt_subs ~ time', data=dists_df[dists_df['group']!='outgroup']).fit()
b117_model.params['time'] = clock_rate
b117_preds = dists_df[dists_df['group']!='outgroup'].copy()
b117_model.params['Intercept'] = np.mean(b117_preds['num_nt_subs'] - (clock_rate*b117_preds['time']))
b117_preds.loc[:, 'predictions'] = b117_model.predict(b117_preds['time'])
b117_n = int(b117_preds.shape[0] / 2)
outgrp_model = ols('num_nt_subs ~ time',
data=dists_df[dists_df['group']=='outgroup']).fit()
outgrp_model.params['time'] = clock_rate
outgrp_preds = dists_df[dists_df['group']=='outgroup'].copy()
outgrp_model.params['Intercept'] = np.mean(outgrp_preds['num_nt_subs'] - (clock_rate*outgrp_preds['time']))
outgrp_preds.loc[:, 'predictions'] = outgrp_model.predict(outgrp_preds['time'])
outgrp_n = int(outgrp_preds.shape[0] / 3)
fig = go.Figure(
data=go.Scatter(y=dists_df[dists_df['group']=='B.1.1.7 (US)']['num_nt_subs'],
x=dists_df[dists_df['group']=='B.1.1.7 (US)']['date'],
name='B.1.1.7 (US)', mode='markers',
text=dists_df[dists_df['group']=='B.1.1.7 (US)']['strain'],
hovertemplate =
'Sample: %{text}',
marker_color='rgba(220,20,60,.6)'))
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='B.1.1.7 (non-US)']['num_nt_subs'],
x=dists_df[dists_df['group']=='B.1.1.7 (non-US)']['date'],
mode='markers', marker_color='rgba(30,144,255,.6)',
name='B.1.1.7 (non-US)'
))
fig.add_trace(go.Scatter(y=b117_preds['predictions'],
x=b117_preds['date'], name='OLS (B.1.1.7)',
mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=b117_preds.iloc[b117_n]['date'],
y=b117_preds.iloc[b117_n]['predictions'],
text=f"B117 Lineage",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='outgroup']['num_nt_subs'],
x=dists_df[dists_df['group']=='outgroup']['date'],
mode='markers', marker_color='rgb(211,211,211, .6)',
name='outgroup'
))
fig.add_trace(go.Scatter(y=outgrp_preds['predictions'],
x=outgrp_preds['date'], name='OLS (outgroup)',
mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=outgrp_preds.iloc[outgrp_n]['date'],
y=outgrp_preds.iloc[outgrp_n]['predictions'],
text=f"outgroup",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.update_layout(yaxis_title='Genetic Distance (root-to-tip)',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
return fig
def b117_aa_distance(gisaid_data, b117_meta, sample_sz=250):
croft_meta = pd.read_csv(b117_meta, sep='\t')
croft_meta = croft_meta[croft_meta['Country']!='USA'].copy()
# extract B117 samples from Emma Croft's build
b117_meta = croft_meta[croft_meta['Pangolin Lineage']=='B.1.1.7'].sample(sample_sz)
# extract outgroup samples from Emma Croft's build
outgrp_meta = croft_meta[croft_meta['Pangolin Lineage']!='B.1.1.7'].sample(sample_sz)
# extract B117 US samples from GISAID
us_b117 = gisaid_data[(gisaid_data['country']=='United States of America')
& (gisaid_data['pangolin_lineage']=='B.1.1.7')].copy()
# consolidate data and analyze
b117_data = gisaid_data[(gisaid_data['strain'].isin(b117_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(outgrp_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(us_b117['strain'].unique()))]
b117_data.loc[:, 'nonsyn'] = False
b117_data.loc[b117_data['ref_aa']!=b117_data['alt_aa'],
'nonsyn'] = True
b117_data.loc[:, 'S_nonsyn'] = False
b117_data.loc[(b117_data['gene']=='S') &
(b117_data['ref_aa']!=b117_data['alt_aa']),
'S_nonsyn'] = True
dists_df = (b117_data.groupby(['strain', 'date'])
.agg(num_nonsyn_muts=('nonsyn', 'sum'),
num_S_nonsyn_muts=('S_nonsyn', 'sum'))
.reset_index())
dists_df = dists_df[~dists_df['date'].isna()]
dists_df.loc[:, 'group'] = 'outgroup'
dists_df.loc[dists_df['strain'].isin(b117_meta['Strain'].unique()), 'group'] = 'B.1.1.7 (non-US)'
dists_df.loc[dists_df['strain'].isin(us_b117['strain'].unique()), 'group'] = 'B.1.1.7 (US)'
dists_df.loc[:, 'date'] = | pd.to_datetime(dists_df['date'], errors='coerce') | pandas.to_datetime |
# basics
import numpy as np
import pandas as pd
import os
# segnlp
from segnlp.utils.stat_sig import compare_dists
class RankItem(dict):
def __init__(self,
id:str,
v:float,
scores:np.ndarray,
metric:str,
random_seeds : np.ndarray,
):
self["id"] = id
self["v"] = v
self["max"] = np.max(scores)
self["min"] = np.min(scores)
self["mean"] = np.mean(scores)
self["std"] = np.std(scores)
self["metric"] = metric
self["top_random_seed"] = random_seeds[np.argmax(scores)]
class Ranking:
@property
def rankings(self):
return | pd.read_csv(self._path_to_rankings, index_col=0) | pandas.read_csv |
"""
@author: <NAME>
file: main_queue.py
"""
from __future__ import print_function
from scoop import futures
import multiprocessing
import numpy as np
import pandas as pd
import timeit
import ZIPapliences as A_ZIP
class load_generation:
""" Class prepares the system for generating load
Attributes
----------
START_TIME_Q (pandas datetime): start time to generate load data
END_TIME_Q (pandas datetime): end time to generate load data
Queue_type (int): 0=inf; 1=C; 2=Ct
P_U_B (int): percentage upper boud --> e.g. 2 = 200% from the reference
physical_machine (int): 1 = single node 2 = multiple nodes
NUM_WORKERS (int): number of workers used when generating load in a single node
NUM_HOMES (int): number of homes being generated
OUT_PUT_FILE_NAME_pre (str): file path to write output
OUT_PUT_FILE_NAME (str): prefix of file name to be writen
OUT_PUT_FILE_NAME_end (str): end of file name
OUT_PUT_FILE_NAME_summary_pre (str): file path to write output
OUT_PUT_FILE_NAME_summary (str): prefix of summary file name to be writen
TIME_DELT (pandas datetime): 1 minute
TIME_DELT_FH (pandas datetime): 1 hour
TIME_DELT_FD (pandas datetime): 1 day
base_max (float): rescaling load reference uper bound
base_min (float): rescaling load reference lower bound
ref_load (pandas series): reference load
DF_A (pandas dataframe): appliances characteristics
DF_ZIP_summer (pandas dataframe): appliances participation during the summer
DF_ZIP_winter (pandas dataframe): appliances participation during the winter
DF_ZIP_spring (pandas dataframe): appliances participation during the spring
APP_parameter_list (list): input parameters
[(float) p.u. percentage of schedulable appliances 0.5=50%,
(int) appliance set size,
(int) average power rating in Watts,
(int) stander power rating in Watts,
(float) average duration in hours,
(float) stander duration in hours,
(float) average duration of the scheduling window in hours,
(float) stander duration of the scheduling window in hours]
Methods
-------
__init__ : create object with the parameters for the load generation
read_data : load input data
"""
def __init__(self,ST,ET,T,P,M,NW,NH):
""" Create load_generation object
Parameters
----------
ST (str): start time to generate load data e.g. '2014-01-01 00:00:00'
ET (str): end time to generate load data
T (int): 0=inf; 1=C; 2=Ct
P (int): percentage upper boud --> e.g. 2 = 200% from the reference
M (int): 1 = single node 2 = multiple nodes
NW (int): number of workers used when generating load in a single node
NH (int): number of homes being generated
"""
self.START_TIME_Q = pd.to_datetime(ST)
self.END_TIME_Q = pd.to_datetime(ET)
self.Queue_type = T
self.P_U_B = P
self.physical_machine = M
self.NUM_WORKERS = NW
self.NUM_HOMES = NH
self.OUT_PUT_FILE_NAME_pre = 'outputdata/multy/'
self.OUT_PUT_FILE_NAME = 'multHDF'
self.OUT_PUT_FILE_NAME_end = '.h5'
self.OUT_PUT_FILE_NAME_summary_pre = 'outputdata/summary/'
self.OUT_PUT_FILE_NAME_summary = 'summaryHDF'
#Auxiliary variables
self.TIME_DELT = | pd.to_timedelta('0 days 00:01:00') | pandas.to_timedelta |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
.. _serp:
Import Search Engine Results Pages (SERPs) for Google and YouTube
=================================================================
"""
__all__ = ['SERP_GOOG_VALID_VALS', 'YOUTUBE_TOPIC_IDS',
'YOUTUBE_VID_CATEGORY_IDS', 'serp_goog', 'serp_youtube',
'set_logging_level', 'youtube_channel_details',
'youtube_video_details']
import datetime
import logging
from itertools import product
import pandas as pd
if int(pd.__version__[0]) >= 1:
from pandas import json_normalize
else:
from pandas.io.json import json_normalize
import requests
SERP_GOOG_LOG_FMT = ('%(asctime)s | %(levelname)s | %(filename)s:%(lineno)d '
'| %(funcName)s | %(message)s')
logging.basicConfig(format=SERP_GOOG_LOG_FMT)
##############################################################################
# Google variables
##############################################################################
SERP_GOOG_VALID_VALS = dict(
fileType={
'bas', 'c', 'cc', 'cpp', 'cs', 'cxx', 'doc', 'docx', 'dwf', 'gpx',
'h', 'hpp', 'htm', 'html', 'hwp', 'java', 'kml', 'kmz', 'odp', 'ods',
'odt', 'pdf', 'pl', 'ppt', 'pptx', 'ps', 'py', 'rtf', 'svg', 'swf',
'tex', 'text', 'txt', 'wap', 'wml', 'xls', 'xlsx', 'xml',
},
c2coff={0, 1},
cr={
'countryAF', 'countryAL', 'countryDZ', 'countryAS', 'countryAD',
'countryAO', 'countryAI', 'countryAQ', 'countryAG', 'countryAR',
'countryAM', 'countryAW', 'countryAU', 'countryAT', 'countryAZ',
'countryBS', 'countryBH', 'countryBD', 'countryBB', 'countryBY',
'countryBE', 'countryBZ', 'countryBJ', 'countryBM', 'countryBT',
'countryBO', 'countryBA', 'countryBW', 'countryBV', 'countryBR',
'countryIO', 'countryBN', 'countryBG', 'countryBF', 'countryBI',
'countryKH', 'countryCM', 'countryCA', 'countryCV', 'countryKY',
'countryCF', 'countryTD', 'countryCL', 'countryCN', 'countryCX',
'countryCC', 'countryCO', 'countryKM', 'countryCG', 'countryCD',
'countryCK', 'countryCR', 'countryCI', 'countryHR', 'countryCU',
'countryCY', 'countryCZ', 'countryDK', 'countryDJ', 'countryDM',
'countryDO', 'countryTP', 'countryEC', 'countryEG', 'countrySV',
'countryGQ', 'countryER', 'countryEE', 'countryET', 'countryEU',
'countryFK', 'countryFO', 'countryFJ', 'countryFI', 'countryFR',
'countryFX', 'countryGF', 'countryPF', 'countryTF', 'countryGA',
'countryGM', 'countryGE', 'countryDE', 'countryGH', 'countryGI',
'countryGR', 'countryGL', 'countryGD', 'countryGP', 'countryGU',
'countryGT', 'countryGN', 'countryGW', 'countryGY', 'countryHT',
'countryHM', 'countryVA', 'countryHN', 'countryHK', 'countryHU',
'countryIS', 'countryIN', 'countryID', 'countryIR', 'countryIQ',
'countryIE', 'countryIL', 'countryIT', 'countryJM', 'countryJP',
'countryJO', 'countryKZ', 'countryKE', 'countryKI', 'countryKP',
'countryKR', 'countryKW', 'countryKG', 'countryLA', 'countryLV',
'countryLB', 'countryLS', 'countryLR', 'countryLY', 'countryLI',
'countryLT', 'countryLU', 'countryMO', 'countryMK', 'countryMG',
'countryMW', 'countryMY', 'countryMV', 'countryML', 'countryMT',
'countryMH', 'countryMQ', 'countryMR', 'countryMU', 'countryYT',
'countryMX', 'countryFM', 'countryMD', 'countryMC', 'countryMN',
'countryMS', 'countryMA', 'countryMZ', 'countryMM', 'countryNA',
'countryNR', 'countryNP', 'countryNL', 'countryAN', 'countryNC',
'countryNZ', 'countryNI', 'countryNE', 'countryNG', 'countryNU',
'countryNF', 'countryMP', 'countryNO', 'countryOM', 'countryPK',
'countryPW', 'countryPS', 'countryPA', 'countryPG', 'countryPY',
'countryPE', 'countryPH', 'countryPN', 'countryPL', 'countryPT',
'countryPR', 'countryQA', 'countryRE', 'countryRO', 'countryRU',
'countryRW', 'countrySH', 'countryKN', 'countryLC', 'countryPM',
'countryVC', 'countryWS', 'countrySM', 'countryST', 'countrySA',
'countrySN', 'countryCS', 'countrySC', 'countrySL', 'countrySG',
'countrySK', 'countrySI', 'countrySB', 'countrySO', 'countryZA',
'countryGS', 'countryES', 'countryLK', 'countrySD', 'countrySR',
'countrySJ', 'countrySZ', 'countrySE', 'countryCH', 'countrySY',
'countryTW', 'countryTJ', 'countryTZ', 'countryTH', 'countryTG',
'countryTK', 'countryTO', 'countryTT', 'countryTN', 'countryTR',
'countryTM', 'countryTC', 'countryTV', 'countryUG', 'countryUA',
'countryAE', 'countryUK', 'countryUS', 'countryUM', 'countryUY',
'countryUZ', 'countryVU', 'countryVE', 'countryVN', 'countryVG',
'countryVI', 'countryWF', 'countryEH', 'countryYE', 'countryYU',
'countryZM', 'countryZW'
},
gl={
'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar',
'as', 'at', 'au', 'aw', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg',
'bh', 'bi', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw',
'by', 'bz', 'ca', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl',
'cm', 'cn', 'co', 'cr', 'cs', 'cu', 'cv', 'cx', 'cy', 'cz', 'de',
'dj', 'dk', 'dm', 'do', 'dz', 'ec', 'ee', 'eg', 'eh', 'er', 'es',
'et', 'fi', 'fj', 'fk', 'fm', 'fo', 'fr', 'ga', 'gd', 'ge', 'gf',
'gh', 'gi', 'gl', 'gm', 'gn', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu',
'gw', 'gy', 'hk', 'hm', 'hn', 'hr', 'ht', 'hu', 'id', 'ie', 'il',
'in', 'io', 'iq', 'ir', 'is', 'it', 'jm', 'jo', 'jp', 'ke', 'kg',
'kh', 'ki', 'km', 'kn', 'kp', 'kr', 'kw', 'ky', 'kz', 'la', 'lb',
'lc', 'li', 'lk', 'lr', 'ls', 'lt', 'lu', 'lv', 'ly', 'ma', 'mc',
'md', 'mg', 'mh', 'mk', 'ml', 'mm', 'mn', 'mo', 'mp', 'mq', 'mr',
'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'nc', 'ne',
'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu', 'nz', 'om', 'pa',
'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr', 'ps', 'pt',
'pw', 'py', 'qa', 're', 'ro', 'ru', 'rw', 'sa', 'sb', 'sc', 'sd',
'se', 'sg', 'sh', 'si', 'sj', 'sk', 'sl', 'sm', 'sn', 'so', 'sr',
'st', 'sv', 'sy', 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk',
'tl', 'tm', 'tn', 'to', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug',
'uk', 'um', 'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi', 'vn',
'vu', 'wf', 'ws', 'ye', 'yt', 'za', 'zm', 'zw',
},
filter={0, 1},
hl={
'af', 'sq', 'sm', 'ar', 'az', 'eu', 'be', 'bn', 'bh', 'bs', 'bg',
'ca', 'zh-CN', 'zh-TW', 'hr', 'cs', 'da', 'nl', 'en', 'eo', 'et',
'fo', 'fi', 'fr', 'fy', 'gl', 'ka', 'de', 'el', 'gu', 'iw', 'hi',
'hu', 'is', 'id', 'ia', 'ga', 'it', 'ja', 'jw', 'kn', 'ko', 'la',
'lv', 'lt', 'mk', 'ms', 'ml', 'mt', 'mr', 'ne', 'no', 'nn', 'oc',
'fa', 'pl', 'pt-BR', 'pt-PT', 'pa', 'ro', 'ru', 'gd', 'sr', 'si',
'sk', 'sl', 'es', 'su', 'sw', 'sv', 'tl', 'ta', 'te', 'th', 'ti',
'tr', 'uk', 'ur', 'uz', 'vi', 'cy', 'xh', 'zu'
},
imgColorType={
'color', 'gray', 'mono', 'trans'
},
imgDominantColor={
'black',
'blue',
'brown',
'gray',
'green',
'orange',
'pink',
'purple',
'red',
'teal',
'white',
'yellow',
},
imgSize={
'huge',
'icon',
'large',
'medium',
'small',
'xlarge',
'xxlarge',
},
imgType={
'clipart',
'face',
'lineart',
'stock',
'photo',
'animated'
},
lr={
'lang_ar', 'lang_bg', 'lang_ca', 'lang_zh-CN', 'lang_zh-TW',
'lang_hr', 'lang_cs', 'lang_da', 'lang_nl', 'lang_en', 'lang_et',
'lang_fi', 'lang_fr', 'lang_de', 'lang_el', 'lang_iw', 'lang_hu',
'lang_is', 'lang_id', 'lang_it', 'lang_ja', 'lang_ko', 'lang_lv',
'lang_lt', 'lang_no', 'lang_pl', 'lang_pt', 'lang_ro', 'lang_ru',
'lang_sr', 'lang_sk', 'lang_sl', 'lang_es', 'lang_sv', 'lang_tr',
},
num={1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
rights={
'cc_publicdomain', 'cc_attribute', 'cc_sharealike',
'cc_noncommercial', 'cc_nonderived'
},
safe={'active', 'off'},
searchType={None, 'image'},
siteSearchFilter={'e', 'i'},
start=range(1, 92)
)
##############################################################################
# YouTube variables
##############################################################################
YOUTUBE_TOPIC_IDS = {
'Entertainment topics': {'Entertainment (parent topic)': '/m/02jjt',
'Humor': '/m/09kqc',
'Movies': '/m/02vxn',
'Performing arts': '/m/05qjc',
'Professional wrestling': '/m/066wd',
'TV shows': '/m/0f2f9'},
'Gaming topics': {'Action game': '/m/025zzc',
'Action-adventure game': '/m/02ntfj',
'Casual game': '/m/0b1vjn',
'Gaming (parent topic)': '/m/0bzvm2',
'Music video game': '/m/02hygl',
'Puzzle video game': '/m/04q1x3q',
'Racing video game': '/m/01sjng',
'Role-playing video game': '/m/0403l3g',
'Simulation video game': '/m/021bp2',
'Sports game': '/m/022dc6',
'Strategy video game': '/m/03hf_rm'},
'Lifestyle topics': {'Fashion': '/m/032tl',
'Fitness': '/m/027x7n',
'Food': '/m/02wbm',
'Hobby': '/m/03glg',
'Lifestyle (parent topic)': '/m/019_rr',
'Pets': '/m/068hy',
'Physical attractiveness [Beauty]': '/m/041xxh',
'Technology': '/m/07c1v',
'Tourism': '/m/07bxq',
'Vehicles': '/m/07yv9'},
'Music topics': {'Christian music': '/m/02mscn',
'Classical music': '/m/0ggq0m',
'Country': '/m/01lyv',
'Electronic music': '/m/02lkt',
'Hip hop music': '/m/0glt670',
'Independent music': '/m/05rwpb',
'Jazz': '/m/03_d0',
'Music (parent topic)': '/m/04rlf',
'Music of Asia': '/m/028sqc',
'Music of Latin America': '/m/0g293',
'Pop music': '/m/064t9',
'Reggae': '/m/06cqb',
'Rhythm and blues': '/m/06j6l',
'Rock music': '/m/06by7',
'Soul music': '/m/0gywn'},
'Other topics': {'Knowledge': '/m/01k8wb'},
'Society topics': {'Business': '/m/09s1f',
'Health': '/m/0kt51',
'Military': '/m/01h6rj',
'Politics': '/m/05qt0',
'Religion': '/m/06bvp',
'Society (parent topic)': '/m/098wr'},
'Sports topics': {'American football': '/m/0jm_',
'Baseball': '/m/018jz',
'Basketball': '/m/018w8',
'Boxing': '/m/01cgz',
'Cricket': '/m/09xp_',
'Football': '/m/02vx4',
'Golf': '/m/037hz',
'Ice hockey': '/m/03tmr',
'Mixed martial arts': '/m/01h7lh',
'Motorsport': '/m/0410tth',
'Sports (parent topic)': '/m/06ntj',
'Tennis': '/m/07bs0',
'Volleyball': '/m/07_53'}
}
YOUTUBE_VID_CATEGORY_IDS = {
'Action/Adventure': '32',
'Anime/Animation': '31',
'Autos & Vehicles': '2',
'Classics': '33',
'Comedy': '34',
'Documentary': '35',
'Drama': '36',
'Education': '27',
'Entertainment': '24',
'Family': '37',
'Film & Animation': '1',
'Foreign': '38',
'Gaming': '20',
'Horror': '39',
'Howto & Style': '26',
'Movies': '30',
'Music': '10',
'News & Politics': '25',
'Nonprofits & Activism': '29',
'People & Blogs': '22',
'Pets & Animals': '15',
'Sci-Fi/Fantasy': '40',
'Science & Technology': '28',
'Short Movies': '18',
'Shorts': '42',
'Shows': '43',
'Sports': '17',
'Thriller': '41',
'Trailers': '44',
'Travel & Events': '19',
'Videoblogging': '21'
}
SERP_YTUBE_VALID_VALS = dict(
channelType={'any', 'show'},
eventType={'completed', 'live', 'upcoming'},
forContentOwner={True, False, 'true', 'false'},
forDeveloper={True, False, 'true', 'false'},
forMine={True, False, 'true', 'false'},
maxResults=range(51),
order={'date', 'rating', 'relevance', 'title',
'videoCount', 'viewCount'},
regionCode={
'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar',
'as', 'at', 'au', 'aw', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg',
'bh', 'bi', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw',
'by', 'bz', 'ca', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl',
'cm', 'cn', 'co', 'cr', 'cs', 'cu', 'cv', 'cx', 'cy', 'cz', 'de',
'dj', 'dk', 'dm', 'do', 'dz', 'ec', 'ee', 'eg', 'eh', 'er', 'es',
'et', 'fi', 'fj', 'fk', 'fm', 'fo', 'fr', 'ga', 'gd', 'ge', 'gf',
'gh', 'gi', 'gl', 'gm', 'gn', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu',
'gw', 'gy', 'hk', 'hm', 'hn', 'hr', 'ht', 'hu', 'id', 'ie', 'il',
'in', 'io', 'iq', 'ir', 'is', 'it', 'jm', 'jo', 'jp', 'ke', 'kg',
'kh', 'ki', 'km', 'kn', 'kp', 'kr', 'kw', 'ky', 'kz', 'la', 'lb',
'lc', 'li', 'lk', 'lr', 'ls', 'lt', 'lu', 'lv', 'ly', 'ma', 'mc',
'md', 'mg', 'mh', 'mk', 'ml', 'mm', 'mn', 'mo', 'mp', 'mq', 'mr',
'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'nc', 'ne',
'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu', 'nz', 'om', 'pa',
'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr', 'ps', 'pt',
'pw', 'py', 'qa', 're', 'ro', 'ru', 'rw', 'sa', 'sb', 'sc', 'sd',
'se', 'sg', 'sh', 'si', 'sj', 'sk', 'sl', 'sm', 'sn', 'so', 'sr',
'st', 'sv', 'sy', 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk',
'tl', 'tm', 'tn', 'to', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug',
'uk', 'um', 'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi', 'vn',
'vu', 'wf', 'ws', 'ye', 'yt', 'za', 'zm', 'zw',
},
relevanceLanguage={
'af', 'sq', 'sm', 'ar', 'az', 'eu', 'be', 'bn', 'bh', 'bs', 'bg',
'ca', 'zh-CN', 'zh-TW', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da',
'nl', 'en', 'eo', 'et', 'fo', 'fi', 'fr', 'fy', 'gl', 'ka', 'de',
'el', 'gu', 'iw', 'hi', 'hu', 'is', 'id', 'ia', 'ga', 'it', 'ja',
'jw', 'kn', 'ko', 'la', 'lv', 'lt', 'mk', 'ms', 'ml', 'mt', 'mr',
'ne', 'no', 'nn', 'oc', 'fa', 'pl', 'pt-BR', 'pt-PT', 'pa', 'ro',
'ru', 'gd', 'sr', 'si', 'sk', 'sl', 'es', 'su', 'sw', 'sv', 'tl',
'ta', 'te', 'th', 'ti', 'tr', 'uk', 'ur', 'uz', 'vi', 'cy', 'xh',
'zu'
},
safeSearch={'moderate', 'none', 'strict'},
topicId={
'/m/04rlf', '/m/02mscn', '/m/0ggq0m', '/m/01lyv', '/m/02lkt',
'/m/0glt670', '/m/05rwpb', '/m/03_d0', '/m/028sqc', '/m/0g293',
'/m/064t9', '/m/06cqb', '/m/06j6l', '/m/06by7', '/m/0gywn',
'/m/0bzvm2', '/m/025zzc', '/m/02ntfj', '/m/0b1vjn', '/m/02hygl',
'/m/04q1x3q', '/m/01sjng', '/m/0403l3g', '/m/021bp2', '/m/022dc6',
'/m/03hf_rm', '/m/06ntj', '/m/0jm_', '/m/018jz', '/m/018w8',
'/m/01cgz', '/m/09xp_', '/m/02vx4', '/m/037hz', '/m/03tmr',
'/m/01h7lh', '/m/0410tth', '/m/07bs0', '/m/07_53', '/m/02jjt',
'/m/09kqc', '/m/02vxn', '/m/05qjc', '/m/066wd', '/m/0f2f9',
'/m/019_rr', '/m/032tl', '/m/027x7n', '/m/02wbm', '/m/03glg',
'/m/068hy', '/m/041xxh', '/m/07c1v', '/m/07bxq', '/m/07yv9',
'/m/098wr', '/m/09s1f', '/m/0kt51', '/m/01h6rj', '/m/05qt0',
'/m/06bvp', '/m/01k8wb'
},
type={'channel', 'playlist', 'video'},
videoCaption={'any', 'closedCaption', 'none'},
videoCategoryId={
'1', '2', '10', '15', '17', '18', '19', '20', '21', '22', '23',
'24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34',
'35', '36', '37', '38', '39', '40', '41', '42', '43', '44'
},
videoDefinition={'any', 'high', 'standard'},
videoDimension={'2d', '3d', 'any'},
videoDuration={'any', 'long', 'medium', 'short'},
videoEmbeddable={'any', True, 'true'},
videoLicense={'any', 'creativeCommon', 'youtube'},
videoSyndicated={'any', True, 'true'},
videoType={'any', 'episode', 'movie'},
)
def _split_by_comma(s, length=50):
"""Group a comma-separated string into a list of at-most
``length``-length words each."""
str_split = s.split(',')
str_list = []
for i in range(0, len(str_split) + length, length):
temp_str = ','.join(str_split[i:i+length])
if temp_str:
str_list.append(temp_str)
return str_list
def youtube_video_details(key, vid_ids):
"""Return details of videos for which the ids are given.
Assumes ``ids`` is a comma-separated list of video ids with
no spaces."""
base_url = ('https://www.googleapis.com/youtube/v3/videos?part='
'contentDetails,id,liveStreamingDetails,localizations,player,'
'recordingDetails,snippet,statistics,status,topicDetails')
vid_ids = _split_by_comma(vid_ids, length=50)
final_df = pd.DataFrame()
for vid_id in vid_ids:
params = {'id': vid_id, 'key': key}
logging.info(msg='Requesting: ' + 'video details')
video_resp = requests.get(base_url, params=params)
if video_resp.status_code >= 400:
raise Exception(video_resp.json())
items_df = pd.DataFrame(video_resp.json()['items'])
details = ['snippet', 'topicDetails', 'statistics',
'status', 'contentDetails']
detail_df = pd.DataFrame()
for detail in details:
try:
detail_df = pd.concat([
detail_df,
pd.DataFrame([x[detail] for x in
video_resp.json()['items']])
], axis=1)
except KeyError:
continue
temp_df = pd.concat([items_df, detail_df], axis=1)
final_df = final_df.append(temp_df, sort=False, ignore_index=True)
return final_df
def youtube_channel_details(key, channel_ids):
"""Return details of channels for which the ids are given.
Assumes ``ids`` is a comma-separated list of channel ids with
no spaces."""
base_url = ('https://www.googleapis.com/youtube/v3/channels?part='
'snippet,contentDetails,statistics')
channel_ids = _split_by_comma(channel_ids, length=50)
final_df = pd.DataFrame()
for channel_id in channel_ids:
params = {'id': channel_id, 'key': key}
logging.info(msg='Requesting: ' + 'channel details')
channel_resp = requests.get(base_url, params=params)
if channel_resp.status_code >= 400:
raise Exception(channel_resp.json())
items_df = pd.DataFrame(channel_resp.json()['items'])
details = ['snippet', 'statistics', 'contentDetails']
detail_df = pd.DataFrame()
for detail in details:
try:
detail_df = pd.concat([
detail_df,
pd.DataFrame([x[detail] for x in
channel_resp.json()['items']])
], axis=1)
except KeyError:
continue
temp_df = pd.concat([items_df, detail_df], axis=1)
final_df = final_df.append(temp_df, sort=False, ignore_index=True)
return final_df
def _dict_product(d):
"""Return the product of all values of a dict, while
coupling each value with its key.
This is used to generate multiple queries out of
possibly multiple arguments in serp_goog.
>>> d = {'a': [1], 'b': [2, 3, 4], 'c': [5, 6]}
>>> _dict_product(d)
>>> [{'a': 1, 'b': 2, 'c': 5},
{'a': 1, 'b': 2, 'c': 6},
{'a': 1, 'b': 3, 'c': 5},
{'a': 1, 'b': 3, 'c': 6},
{'a': 1, 'b': 4, 'c': 5},
{'a': 1, 'b': 4, 'c': 6}]
"""
items = list(d.items())
keys = [x[0] for x in items]
values = [x[1] for x in items]
dicts = []
for prod in product(*values):
tempdict = dict(zip(keys, prod))
dicts.append(tempdict)
return dicts
def serp_goog(q, cx, key, c2coff=None, cr=None,
dateRestrict=None, exactTerms=None, excludeTerms=None,
fileType=None, filter=None, gl=None, highRange=None,
hl=None, hq=None, imgColorType=None, imgDominantColor=None,
imgSize=None, imgType=None, linkSite=None, lowRange=None,
lr=None, num=None, orTerms=None, relatedSite=None,
rights=None, safe=None, searchType=None, siteSearch=None,
siteSearchFilter=None, sort=None, start=None):
"""Query Google and get search results in a DataFrame.
For each parameter, you can supply single or multiple values / arguments.
If you pass multiple arguments, all the possible combinations of
arguments (the product) will be requested, and you will get one
DataFrame combining all queries. See examples below.
:param q: The search expression.
:param cx: The custom search engine ID to use for this
request.
:param key: The API key of your custom search engine.
:param c2coff: Enables or disables Simplified and
Traditional Chinese Search. The default value for this
parameter is 0 (zero), meaning that the feature is enabled.
Supported values are:1: Disabled0: Enabled (default)
:param cr: Restricts search results to documents
originating in a particular country. You may use Boolean
operators in the cr parameter's value.Google Search
determines the country of a document by analyzing:the top-
level domain (TLD) of the document's URLthe geographic
location of the Web server's IP addressSee the Country
Parameter Values page for a list of valid values for this
parameter.
:param dateRestrict: Restricts results to URLs based on
date. Supported values include:d[number]: requests results
from the specified number of past days.
- d[number]: requests results from the specified number of past days.
- w[number]: requests results from the specified number of past weeks.
- m[number]: requests results from the specified number of past months.
- y[number]: requests results from the specified number of past years.
:param exactTerms: Identifies a phrase that all
documents in the search results must contain.
:param excludeTerms: Identifies a word or phrase that
should not appear in any documents in the search results.
:param fileType: Restricts results to files of a
specified extension. A list of file types indexable by
Google can be found in Search Console Help Center.
:param filter: Controls turning on or off the duplicate
content filter.See Automatic Filtering for more information
about Google's search results filters. Note that host
crowding filtering applies only to multi-site searches.By
default, Google applies filtering to all search results to
improve the quality of those results. Acceptable values
are: "0": Turns off duplicate content filter. "1": Turns
on duplicate content filter.
:param gl: Geolocation of end user. The gl parameter
value is a two-letter country code. The gl parameter boosts
search results whose country of origin matches the parameter
value. See the Country Codes page for a list of valid
values.Specifying a gl parameter value should lead to more
relevant results. This is particularly true for
international customers and, even more specifically, for
customers in English- speaking countries other than the
United States.
:param highRange: Specifies the ending value for a
search range.Use lowRange and highRange to append an
inclusive search range of lowRange...highRange to the query.
:param hl: Sets the user interface language. Explicitly
setting this parameter improves the performance and the
quality of your search results.See the Interface
Languages section of Internationalizing Queries and Results
Presentation for more information, and Supported Interface
Languages for a list of supported languages.
:param hq: Appends the specified query terms to the
query, as if they were combined with a logical AND operator.
:param imgColorType: Returns black and white, grayscale,
or color images: mono, gray, and color. Acceptable values
are: "color": color "gray": gray "mono": mono
:param imgDominantColor: Returns images of a specific
dominant color. Acceptable values are: "black": black
"blue": blue "brown": brown "gray": gray "green": green
"orange": orange "pink": pink "purple": purple "red": red
"teal": teal "white": white "yellow": yellow
:param imgSize: Returns images of a specified size.
Acceptable values are: "huge": huge "icon": icon "large":
large "medium": medium "small": small "xlarge": xlarge
"xxlarge": xxlarge
:param imgType: Returns images of a type. Acceptable
values are: "clipart": clipart "face": face "lineart":
lineart "news": news "photo": photo
:param linkSite: Specifies that all search results
should contain a link to a particular URL
:param lowRange: Specifies the starting value for a
search range. Use lowRange and highRange to append an
inclusive search range of lowRange...highRange to the query.
:param lr: Restricts the search to documents written in
a particular language (e.g., lr=lang_ja). Acceptable values
are: "lang_ar": Arabic "lang_bg": Bulgarian "lang_ca":
Catalan "lang_cs": Czech "lang_da": Danish "lang_de":
German "lang_el": Greek "lang_en": English "lang_es":
Spanish "lang_et": Estonian "lang_fi": Finnish "lang_fr":
French "lang_hr": Croatian "lang_hu": Hungarian
"lang_id": Indonesian "lang_is": Icelandic "lang_it":
Italian "lang_iw": Hebrew "lang_ja": Japanese "lang_ko":
Korean "lang_lt": Lithuanian "lang_lv": Latvian
"lang_nl": Dutch "lang_no": Norwegian "lang_pl": Polish
"lang_pt": Portuguese "lang_ro": Romanian "lang_ru":
Russian "lang_sk": Slovak "lang_sl": Slovenian "lang_sr":
Serbian "lang_sv": Swedish "lang_tr": Turkish "lang_zh-
CN": Chinese (Simplified) "lang_zh-TW": Chinese
(Traditional)
:param num: Number of search results to return.Valid
values are integers between 1 and 10, inclusive.
:param orTerms: Provides additional search terms to
check for in a document, where each document in the search
results must contain at least one of the additional search
terms.
:param relatedSite: Specifies that all search results
should be pages that are related to the specified URL.
:param rights: Filters based on licensing. Supported
values include: cc_publicdomain, cc_attribute,
cc_sharealike, cc_noncommercial, cc_nonderived, and
combinations of these.
:param safe: Search safety level. Acceptable values
are: "active": Enables SafeSearch filtering. "off":
Disables SafeSearch filtering. (default)
:param searchType: Specifies the search type: image. If
unspecified, results are limited to webpages. Acceptable
values are: "image": custom image search.
:param siteSearch: Specifies all search results should
be pages from a given site.
:param siteSearchFilter: Controls whether to include or
exclude results from the site named in the siteSearch
parameter. Acceptable values are: "e": exclude "i":
include
:param sort: The sort expression to apply to the
results.
:param start: The index of the first result to
return.Valid value are integers starting 1 (default) and the
second result is 2 and so forth. For example &start=11 gives
the second page of results with the default "num" value of
10 results per page.Note: No more than 100 results will ever
be returned for any query with JSON API, even if more than
100 documents match the query, so setting (start + num) to
more than 100 will produce an error. Note that the maximum
value for num is 10.
The following function call will produce two queries:
"hotel" in the USA, and "hotel" in France
>>> serp_goog(q='hotel', gl=['us', 'fr'], cx='YOUR_CX', key='YOUR_KEY')
The below function call will prouce four queries and make four requests:
"fligts" in UK
"fligts" in Australia
"tickets" in UK
"tickets" in Australia
'cr' here refers to 'country restrict', which focuses on content
originating from the specified country.
>>> serp_goog(q=['flights', 'tickets'], cr=['countryUK', 'countryAU'],
cx='YOUR_CX', key='YOUR_KEY')
"""
params = locals()
supplied_params = {k: v for k, v in params.items() if params[k] is not None}
for p in supplied_params:
if isinstance(supplied_params[p], (str, int)):
supplied_params[p] = [supplied_params[p]]
for p in supplied_params:
if p in SERP_GOOG_VALID_VALS:
if not set(supplied_params[p]).issubset(SERP_GOOG_VALID_VALS[p]):
raise ValueError('Please make sure you provide a'
' valid value for "{}", valid values:\n'
'{}'.format(p,
sorted(SERP_GOOG_VALID_VALS[p])))
params_list = _dict_product(supplied_params)
base_url = 'https://www.googleapis.com/customsearch/v1?'
specified_cols = ['searchTerms', 'rank', 'title', 'snippet',
'displayLink', 'link', 'queryTime', 'totalResults']
responses = []
for param in params_list:
param_log = ', '.join([k + '=' + str(v) for k, v in param.items()])
logging.info(msg='Requesting: ' + param_log)
resp = requests.get(base_url, params=param)
if resp.status_code >= 400:
raise Exception(resp.json())
responses.append(resp)
result_df = pd.DataFrame()
for i, resp in enumerate(responses):
request_metadata = resp.json()['queries']['request'][0]
del request_metadata['title']
search_info = resp.json()['searchInformation']
if int(search_info['totalResults']) == 0:
df = pd.DataFrame(columns=specified_cols, index=range(1))
df['searchTerms'] = request_metadata['searchTerms']
# These keys don't appear in the response so they have to be
# added manually
for missing in ['lr', 'num', 'start', 'c2coff']:
if missing in params_list[i]:
df[missing] = params_list[i][missing]
else:
df = pd.DataFrame(resp.json()['items'])
df['cseName'] = resp.json()['context']['title']
start_idx = request_metadata['startIndex']
df['rank'] = range(start_idx, start_idx + len(df))
for missing in ['lr', 'num', 'start', 'c2coff']:
if missing in params_list[i]:
df[missing] = params_list[i][missing]
meta_columns = {**request_metadata, **search_info}
df = df.assign(**meta_columns)
df['queryTime'] = datetime.datetime.now(tz=datetime.timezone.utc)
df['queryTime'] = pd.to_datetime(df['queryTime'])
if 'image' in df:
img_df = | json_normalize(df['image']) | pandas.io.json.json_normalize |
# python regression_randombag_sample.py MSA_NAME LEN_SEEDS NUM_SAMPLE SAMPLE_FRAC
# python regression_randombag_sample.py Atlanta 3 NUM_SAMPLE SAMPLE_FRAC
import setproctitle
setproctitle.setproctitle("covid-19-vac@chenlin")
import sys
import os
import constants
import functions
import numpy as np
import pandas as pd
import pickle
from sklearn import preprocessing
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import summary_table
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from scipy import stats
import math
import pdb
###############################################################################
# Main variables
root = '/data/chenlin/COVID-19/Data'
timestring = '20210206'
MSA_NAME = sys.argv[1]
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[MSA_NAME]
print('\nMSA_NAME: ',MSA_NAME)
RANDOM_SEED_LIST = [66,42,5]
LEN_SEEDS = int(sys.argv[2])
print('Num of random seeds: ', LEN_SEEDS)
print('Random seeds: ',RANDOM_SEED_LIST[:LEN_SEEDS])
LASSO_alpha = 0.1 #float(sys.argv[2])
#print('LASSO_alpha:',LASSO_alpha)
NUM_SAMPLE = int(sys.argv[3]); print('Num of samples: ', NUM_SAMPLE)
SAMPLE_FRAC = float(sys.argv[4]); print('Sample fraction: ', SAMPLE_FRAC)
###############################################################################
# Load Common Data: No need for reloading when switching among differet MSAs.
# Load POI-CBG visiting matrices
f = open(os.path.join(root, MSA_NAME, '%s_2020-03-01_to_2020-05-02.pkl'%MSA_NAME_FULL), 'rb')
poi_cbg_visits_list = pickle.load(f)
f.close()
# Load ACS Data for matching with NYT Data
acs_data = pd.read_csv(os.path.join(root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
# Load NYT Data
nyt_data = pd.read_csv(os.path.join(root, 'us-counties.csv'))
# Load Demographic Data
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = | pd.read_csv(filepath) | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = | Series([2, 3, np.nan], index=[1, 2, 3], dtype=object) | pandas.Series |
from mimic3benchmark.readers import DecompensationReader, InHospitalMortalityReader
import pandas as pd
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.debug("hello")
# reader = DecompensationReader(dataset_dir='data/decompensation/train',
# listfile='data/decompensation/train/listfile.csv')
reader =InHospitalMortalityReader(dataset_dir='data/in-hospital-mortality/train',
listfile='data/in-hospital-mortality/train/listfile.csv')
print("we have 100k indices, and they get split between train and test. ")
print("we also have different episodes split as well")
# print("Contains all the pertinent info for rejoining everything")
print(reader.read_example(10))
print("so we have this 10th example. Now, what do we do to it?")
print(reader.read_example(10)["name"])
patient_id = reader.read_example(10)["name"].split("_")[0]
MIMIC_ROOT = "data/root/train/"
MIMIC_og_data_ROOT = "data/physionet.org/files/mimiciii/1.4/"
notes_table = "NOTEEVENTS.csv"
import os
with open(os.path.join(MIMIC_ROOT, patient_id, "stays.csv"), "r") as file:
print("finding relevant info for {}".format(patient_id))
entries = []
for line in file:
stuff = line.split(",")
print(stuff)
entries.append(stuff[0:3])
entries = entries[1:]
print("why are there two things in here? Because there are two episodes")
print("the reason is that. the mortality prediction only uses 1 episode. The reason is that they may remove or invalidate parts of it for the mortality prediction"
"For instance! the stay may have been < 48 hours"
"but the decompensation prediction episode for examples uses 2 episodes")
# subj_id =
# hadm_id = bb
# icustay_id = cc
print("now, we will do some interesting joining!")
df = pd.read_csv(os.path.join(MIMIC_og_data_ROOT, notes_table))
df.CHARTDATE = pd.to_datetime(df.CHARTDATE)
df.CHARTTIME = | pd.to_datetime(df.CHARTTIME) | pandas.to_datetime |
import pickle
import os
import numpy as np
import torch.nn.functional as F
import torch
import pandas as pd
import anndata as ad
from scETM import scETM
from multiprocessing import Pool
from itertools import repeat
def simulate_mean_diff_once(data):
half = len(data) // 2
ind = np.arange(len(data))
np.random.shuffle(ind)
md = data[ind[:half]].mean(0) - data[ind[half:half * 2]].mean(0)
return md
def simulate_mean_diff(data, repeats):
mds = []
for _ in range(repeats):
mds.append(simulate_mean_diff_once(data))
return mds
def calc_md_pval(series, delta_kept, mds_simulated):
mds = []
unique = series.unique()
for t in unique:
test = delta_kept[series == t] # (cells_test, topics)
ctrl = delta_kept[series != t] # (cells_ctrl, topics)
md = test.mean(0) - ctrl.mean(0) # (topics)
mds.append(md)
mds = np.array(mds) # (cell_types, topics)
mds_simulated = np.array(mds_simulated)
pvals = (mds_simulated.T[None, ...] > mds[..., None]).sum(-1) + 1 / (reps + 1) # (cell_types, topics, *repeats*)
pvals = | pd.DataFrame(pvals, index=unique, columns=topic_kept) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, | pd.Timestamp("2015-01-09") | pandas.Timestamp |
from datetime import datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT
import pandas.compat as compat
from pandas import (
DatetimeIndex, Index, NaT, Period, Series, Timedelta, TimedeltaIndex,
Timestamp, isna)
from pandas.core.arrays import PeriodArray
from pandas.util import testing as tm
@pytest.mark.parametrize("nat,idx", [(Timestamp("NaT"), DatetimeIndex),
(Timedelta("NaT"), TimedeltaIndex),
(Period("NaT", freq="M"), PeriodArray)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
ser = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"),
NaT, "NaT", "nat"])
def test_identity(klass, value):
assert klass(value) is NaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan])
def test_equality(klass, value):
if klass is Period and value == "":
pytest.skip("Period cannot parse empty string")
assert klass(value).value == iNaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta])
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
def test_round_nat(klass, method, freq):
# see gh-14940
ts = klass("nat")
round_method = getattr(ts, method)
assert round_method(freq) is ts
@pytest.mark.parametrize("method", [
"astimezone", "combine", "ctime", "dst", "fromordinal",
"fromtimestamp", "isocalendar", "strftime", "strptime",
"time", "timestamp", "timetuple", "timetz", "toordinal",
"tzname", "utcfromtimestamp", "utcnow", "utcoffset",
"utctimetuple", "timestamp"
])
def test_nat_methods_raise(method):
# see gh-9513, gh-17329
msg = "NaTType does not support {method}".format(method=method)
with pytest.raises(ValueError, match=msg):
getattr(NaT, method)()
@pytest.mark.parametrize("method", [
"weekday", "isoweekday"
])
def test_nat_methods_nan(method):
# see gh-9513, gh-17329
assert np.isnan(getattr(NaT, method)())
@pytest.mark.parametrize("method", [
"date", "now", "replace", "today",
"tz_convert", "tz_localize"
])
def test_nat_methods_nat(method):
# see gh-8254, gh-9513, gh-17329
assert getattr(NaT, method)() is NaT
@pytest.mark.parametrize("get_nat", [
lambda x: NaT,
lambda x: Timedelta(x),
lambda x: Timestamp(x)
])
def test_nat_iso_format(get_nat):
# see gh-12300
assert get_nat("NaT").isoformat() == "NaT"
@pytest.mark.parametrize("klass,expected", [
(Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]),
(Timedelta, ["components", "delta", "is_populated", "to_pytimedelta",
"to_timedelta64", "view"])
])
def test_missing_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# Here, we check which public methods NaT does not have. We
# ignore any missing private methods.
nat_names = dir(NaT)
klass_names = dir(klass)
missing = [x for x in klass_names if x not in nat_names and
not x.startswith("_")]
missing.sort()
assert missing == expected
def _get_overlap_public_nat_methods(klass, as_tuple=False):
"""
Get overlapping public methods between NaT and another class.
Parameters
----------
klass : type
The class to compare with NaT
as_tuple : bool, default False
Whether to return a list of tuples of the form (klass, method).
Returns
-------
overlap : list
"""
nat_names = dir(NaT)
klass_names = dir(klass)
overlap = [x for x in nat_names if x in klass_names and
not x.startswith("_") and
callable(getattr(klass, x))]
# Timestamp takes precedence over Timedelta in terms of overlap.
if klass is Timedelta:
ts_names = dir(Timestamp)
overlap = [x for x in overlap if x not in ts_names]
if as_tuple:
overlap = [(klass, method) for method in overlap]
overlap.sort()
return overlap
@pytest.mark.parametrize("klass,expected", [
(Timestamp, ["astimezone", "ceil", "combine", "ctime", "date", "day_name",
"dst", "floor", "fromisoformat", "fromordinal",
"fromtimestamp", "isocalendar", "isoformat", "isoweekday",
"month_name", "now", "replace", "round", "strftime",
"strptime", "time", "timestamp", "timetuple", "timetz",
"to_datetime64", "to_numpy", "to_pydatetime", "today",
"toordinal", "tz_convert", "tz_localize", "tzname",
"utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple",
"weekday"]),
(Timedelta, ["total_seconds"])
])
def test_overlap_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# In case when Timestamp, Timedelta, and NaT are overlap, the overlap
# is considered to be with Timestamp and NaT, not Timedelta.
# "fromisoformat" was introduced in 3.7
if klass is Timestamp and not compat.PY37:
expected.remove("fromisoformat")
assert _get_overlap_public_nat_methods(klass) == expected
@pytest.mark.parametrize("compare", (
_get_overlap_public_nat_methods(Timestamp, True) +
_get_overlap_public_nat_methods(Timedelta, True))
)
def test_nat_doc_strings(compare):
# see gh-17327
#
# The docstrings for overlapping methods should match.
klass, method = compare
klass_doc = getattr(klass, method).__doc__
nat_doc = getattr(NaT, method).__doc__
assert klass_doc == nat_doc
_ops = {
"left_plus_right": lambda a, b: a + b,
"right_plus_left": lambda a, b: b + a,
"left_minus_right": lambda a, b: a - b,
"right_minus_left": lambda a, b: b - a,
"left_times_right": lambda a, b: a * b,
"right_times_left": lambda a, b: b * a,
"left_div_right": lambda a, b: a / b,
"right_div_left": lambda a, b: b / a,
}
@pytest.mark.parametrize("op_name", list(_ops.keys()))
@pytest.mark.parametrize("value,val_type", [
(2, "scalar"),
(1.5, "scalar"),
(np.nan, "scalar"),
(timedelta(3600), "timedelta"),
(Timedelta("5s"), "timedelta"),
(datetime(2014, 1, 1), "timestamp"),
(Timestamp("2014-01-01"), "timestamp"),
(Timestamp("2014-01-01", tz="UTC"), "timestamp"),
(Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
(pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
])
def test_nat_arithmetic_scalar(op_name, value, val_type):
# see gh-6873
invalid_ops = {
"scalar": {"right_div_left"},
"timedelta": {"left_times_right", "right_times_left"},
"timestamp": {"left_times_right", "right_times_left",
"left_div_right", "right_div_left"}
}
op = _ops[op_name]
if op_name in invalid_ops.get(val_type, set()):
if (val_type == "timedelta" and "times" in op_name and
isinstance(value, Timedelta)):
msg = "Cannot multiply"
else:
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
op(NaT, value)
else:
if val_type == "timedelta" and "div" in op_name:
expected = np.nan
else:
expected = NaT
assert op(NaT, value) is expected
@pytest.mark.parametrize("val,expected", [
(np.nan, NaT),
(NaT, np.nan),
(np.timedelta64("NaT"), np.nan)
])
def test_nat_rfloordiv_timedelta(val, expected):
# see gh-#18846
#
# See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
td = Timedelta(hours=3, minutes=4)
assert td // val is expected
@pytest.mark.parametrize("op_name", [
"left_plus_right", "right_plus_left",
"left_minus_right", "right_minus_left"
])
@pytest.mark.parametrize("value", [
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
TimedeltaIndex(["1 day", "2 day"], name="x"),
])
def test_nat_arithmetic_index(op_name, value):
# see gh-11718
exp_name = "x"
exp_data = [NaT] * 2
if isinstance(value, DatetimeIndex) and "plus" in op_name:
expected = DatetimeIndex(exp_data, name=exp_name, tz=value.tz)
else:
expected = TimedeltaIndex(exp_data, name=exp_name)
tm.assert_index_equal(_ops[op_name](NaT, value), expected)
@pytest.mark.parametrize("op_name", [
"left_plus_right", "right_plus_left",
"left_minus_right", "right_minus_left"
])
@pytest.mark.parametrize("box", [TimedeltaIndex, Series])
def test_nat_arithmetic_td64_vector(op_name, box):
# see gh-19124
vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
def test_nat_pinned_docstrings():
# see gh-17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
def test_to_numpy_alias():
# GH 24653: alias .to_numpy() for scalars
expected = NaT.to_datetime64()
result = NaT.to_numpy()
assert isna(expected) and isna(result)
@pytest.mark.parametrize("other", [
| Timedelta(0) | pandas.Timedelta |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Covid statistics Netherlands - functions related to data loading.
Exports:
- `init_data()`: load data, store into global DFS, download updates if necessary.
- `DFS`: dict with initialized data (DataFrames).
Created on Sat Oct 23 16:06:32 2021
@author: @hk_nien on Twitter
"""
import re
import io
import urllib
import urllib.request
from pathlib import Path
import time
import pandas as pd
import nl_regions
try:
DATA_PATH = Path(__file__).parent / 'data'
except NameError:
DATA_PATH = Path('data')
# this will contain dataframes, initialized by init_data().
# - mun: municipality demograhpics
# - cases: cases by municipality
# - events: Dutch events by date
# - Rt_rivm: RIVM Rt estimates
# - anomalies: anomaly data
DFS = {}
def _str_datetime(t):
return t.strftime('%a %d %b %H:%M')
def load_events():
"""Return events DataFrame.
- index: DateTime start.
- 'Date_stop': DateTime
- 'Description': Description string.
- 'Flags': None or string (to indicate that it is only shown in a
particular type of grahp.
"""
df = pd.read_csv(DATA_PATH / 'events_nl.csv', comment='#')
df['Date'] = pd.to_datetime(df['Date']) + | pd.Timedelta('12:00:00') | pandas.Timedelta |
import pendulum as pdl
import sys
sys.path.append(".")
# the memoization-related library
import loguru
import itertools
import portion
import klepto.keymaps
import CacheIntervals as ci
from CacheIntervals.utils import flatten
from CacheIntervals.utils import pdl2pd, pd2pdl
from CacheIntervals.utils import Timer
from CacheIntervals.Intervals import pd2po, po2pd
from CacheIntervals.RecordInterval import RecordIntervals, RecordIntervalsPandas
class QueryRecorder:
'''
A helper class
'''
pass
class MemoizationWithIntervals(object):
'''
The purpose of this class is to optimise
the number of call to a function retrieving
possibly disjoint intervals:
- do standard caching for a given function
- additively call for a date posterior to one
already cached is supposed to yield a pandas
Frame which can be obtained by concatenating
the cached result and a -- hopefully much --
smaller query
Maintains a list of intervals that have been
called.
With a new interval:
-
'''
keymapper = klepto.keymaps.stringmap(typed=False, flat=False)
def __init__(self,
pos_args=None,
names_kwarg=None,
classrecorder=RecordIntervalsPandas,
aggregation=lambda listdfs: pd.concat(listdfs, axis=0),
debug=False,
# memoization=klepto.lru_cache(
# cache=klepto.archives.hdf_archive(
# f'{pdl.today().to_date_string()}_memoization.hdf5'),
# keymap=keymapper),
memoization=klepto.lru_cache(
cache=klepto.archives.dict_archive(),
keymap=keymapper),
**kwargs):
'''
:param pos_args: the indices of the positional
arguments that will be handled as intervals
:param names_kwarg: the name of the named parameters
that will be handled as intervals
:param classrecorder: the interval recorder type
we want to use
:param memoization: a memoization algorithm
'''
# A dictionary of positional arguments indices
# that are intervals
self.argsi = {}
self.kwargsi = {}
# if pos_args is not None:
# for posarg in pos_args:
# self.argsi[posarg] = classrecorder(**kwargs)
self.pos_args_itvl = pos_args if pos_args is not None else []
#print(self.args)
# if names_kwarg is not None:
# for namedarg in names_kwarg:
# self.kwargsi[namedarg] = classrecorder(**kwargs)
self.names_kwargs_itvl = names_kwarg if names_kwarg is not None else {}
#print(self.kwargs)
self.memoization = memoization
self.aggregation = aggregation
self.debugQ = debug
self.argsdflt = None
self.kwargsdflt = None
self.time_last_call = pdl.today()
self.classrecorder = classrecorder
self.kwargsrecorder = kwargs
self.argssolver = None
self.query_recorder = QueryRecorder()
def __call__(self, f):
'''
The interval memoization leads to several calls to the
standard memoised function and generates a list of return values.
The aggregation is needed for the doubly lazy
function to have the same signature as the
To access, the underlying memoized function pass
get_function_cachedQ=True to the kwargs of the
overloaded call (not of this function
:param f: the function to memoize
:return: the wrapper to the memoized function
'''
if self.argssolver is None:
self.argssolver = ci.Functions.ArgsSolver(f, split_args_kwargsQ=True)
@self.memoization
def f_cached(*args, **kwargs):
'''
The cached function is used for a double purpose:
1. for standard calls, will act as the memoised function in a traditional way
2. Additively when pass parameters of type QueryRecorder, it will create
or retrieve the interval recorders associated with the values of
non-interval parameters.
In this context, we use the cached function as we would a dictionary.
'''
QueryRecorderQ = False
args_new = []
kwargs_new = {}
'''
check whether this is a standard call to the user function
or a request for the interval recorders
'''
for i,arg in enumerate(args):
if isinstance(arg, QueryRecorder):
args_new.append(self.classrecorder(**self.kwargsrecorder))
QueryRecorderQ = True
else:
args_new.append(args[i])
for name in kwargs:
if isinstance(kwargs[name], QueryRecorder):
kwargs_new[name] = self.classrecorder(**self.kwargsrecorder)
QueryRecorderQ = True
else:
kwargs_new[name] = kwargs[name]
if QueryRecorderQ:
return args_new, kwargs_new
return f(*args, **kwargs)
def wrapper(*args, **kwargs):
if kwargs.get('get_function_cachedQ', False):
return f_cached
#loguru.logger.debug(f'function passed: {f_cached}')
loguru.logger.debug(f'args passed: {args}')
loguru.logger.debug(f'kwargs passed: {kwargs}')
# First pass: resolve the recorders
dargs_exp, kwargs_exp = self.argssolver(*args, **kwargs)
# Intervals are identified by position and keyword name
# 1. First get the interval recorders
args_exp = list(dargs_exp.values())
args_exp_copy = args_exp.copy()
kwargs_exp_copy = kwargs_exp.copy()
for i in self.pos_args_itvl:
args_exp_copy[i] = self.query_recorder
for name in self.names_kwargs_itvl:
kwargs_exp_copy[name] = self.query_recorder
args_with_ri, kwargs_with_ri = f_cached(*args_exp_copy, **kwargs_exp_copy)
# 2. Now get the the actual list of intervals
for i in self.pos_args_itvl:
# reuse args_exp_copy to store the list
args_exp_copy[i] = args_with_ri[i](args_exp[i])
for name in self.names_kwargs_itvl:
# reuse kwargs_exp_copy to store the list
kwargs_exp_copy[name] = kwargs_with_ri[name](kwargs_exp[name])
'''3. Then generate all combination of parameters
3.a - args'''
ns_args = range(len(args_exp))
lists_possible_args = [[args_exp[i]] if i not in self.pos_args_itvl else args_exp_copy[i] for i in ns_args]
# Take the cartesian product of these
calls_args = list( map(list,itertools.product(*lists_possible_args)))
'''3.b kwargs'''
#kwargs_exp_vals = kwargs_exp_copy.values()
names_kwargs = list(kwargs_exp_copy.keys())
lists_possible_kwargs = [[kwargs_exp[name]] if name not in self.names_kwargs_itvl
else kwargs_exp_copy[name] for name in names_kwargs]
calls_kwargs = list(map(lambda l: dict(zip(names_kwargs,l)), itertools.product(*lists_possible_kwargs)))
calls = list(itertools.product(calls_args, calls_kwargs))
if self.debugQ:
results = []
for call in calls:
with Timer() as timer:
results.append(f_cached(*call[0], **call[1]) )
print('Timer to demonstrate caching:')
timer.display(printQ=True)
else:
results = [f_cached(*call[0], **call[1]) for call in calls]
result = self.aggregation(results)
return result
return wrapper
if __name__ == "__main__":
import logging
import daiquiri
import pandas as pd
import time
daiquiri.setup(logging.DEBUG)
logging.getLogger('OneTick64').setLevel(logging.WARNING)
logging.getLogger('databnpp.ODCB').setLevel(logging.WARNING)
logging.getLogger('requests_kerberos').setLevel(logging.WARNING)
pd.set_option('display.max_rows', 200)
pd.set_option('display.width', 600)
pd.set_option('display.max_columns', 200)
tssixdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-5))
tsfivedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-4))
tsfourdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-3))
tsthreedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-2))
tstwodaysago = pdl2pd(pdl.yesterday('UTC').add(days=-1))
tsyesterday = pdl2pd(pdl.yesterday('UTC'))
tstoday = pdl2pd(pdl.today('UTC'))
tstomorrow = pdl2pd(pdl.tomorrow('UTC'))
tsintwodays = pdl2pd(pdl.tomorrow('UTC').add(days=1))
tsinthreedays = pdl2pd(pdl.tomorrow('UTC').add(days=2))
def print_calls(calls):
print( list( map( lambda i: (i.left, i.right), calls)))
def print_calls_dates(calls):
print( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
def display_calls(calls):
loguru.logger.info( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Testing record intervals -> ok
if True:
itvals = RecordIntervals()
calls = itvals(portion.closed(pdl.yesterday(), pdl.today()))
print(list(map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()), calls)))
print(list(map(lambda i: type(i), calls)))
calls = itvals( portion.closed(pdl.yesterday().add(days=-1), pdl.today().add(days=1)))
#print(calls)
print( list( map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()),
calls)))
# Testing record intervals pandas -> ok
if True:
itvals = RecordIntervalsPandas()
# yesterday -> today
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday()), pdl2pd(pdl.today()), closed='left'))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> tomorrow: should yield 3 intervals
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)), pdl2pd(pdl.today().add(days=1))))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> day after tomorrow: should yield 4 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)),
pdl2pd(pdl.tomorrow().add(days=1))))
print(
list(
map(
lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# 2 days before yesterday -> 2day after tomorrow: should yield 6 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
print(list(map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Further tests on record intervals pandas
if False:
itvals = RecordIntervalsPandas()
calls = itvals(pd.Interval(tstwodaysago, tstomorrow, closed='left'))
display_calls(calls)
calls = itvals( | pd.Interval(tstwodaysago, tsyesterday) | pandas.Interval |
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
import glob
import math
import os
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
from dask import dataframe as dd
from dask.utils import natural_sort_key
import cudf
import dask_cudf
# Check if create_metadata_file is supported by
# the current dask.dataframe version
need_create_meta = pytest.mark.skipif(
dask_cudf.io.parquet.create_metadata_file is None,
reason="Need create_metadata_file support in dask.dataframe.",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)],
},
index=pd.Index(range(nrows), name="index"),
) # Sorted
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.mark.parametrize("stats", [True, False])
def test_roundtrip_from_dask(tmpdir, stats):
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine="pyarrow")
files = sorted(
(os.path.join(tmpdir, f) for f in os.listdir(tmpdir)),
key=natural_sort_key,
)
# Read list of parquet files
ddf2 = dask_cudf.read_parquet(files, gather_statistics=stats)
dd.assert_eq(ddf, ddf2, check_divisions=stats)
# Specify columns=['x']
ddf2 = dask_cudf.read_parquet(
files, columns=["x"], gather_statistics=stats
)
dd.assert_eq(ddf[["x"]], ddf2, check_divisions=stats)
# Specify columns='y'
ddf2 = dask_cudf.read_parquet(files, columns="y", gather_statistics=stats)
dd.assert_eq(ddf[["y"]], ddf2, check_divisions=stats)
# Now include metadata
ddf2 = dask_cudf.read_parquet(tmpdir, gather_statistics=stats)
dd.assert_eq(ddf, ddf2, check_divisions=stats)
# Specify columns=['x'] (with metadata)
ddf2 = dask_cudf.read_parquet(
tmpdir, columns=["x"], gather_statistics=stats
)
dd.assert_eq(ddf[["x"]], ddf2, check_divisions=stats)
# Specify columns='y' (with metadata)
ddf2 = dask_cudf.read_parquet(tmpdir, columns="y", gather_statistics=stats)
dd.assert_eq(ddf[["y"]], ddf2, check_divisions=stats)
def test_roundtrip_from_dask_index_false(tmpdir):
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine="pyarrow")
ddf2 = dask_cudf.read_parquet(tmpdir, index=False)
dd.assert_eq(ddf.reset_index(drop=False), ddf2)
def test_roundtrip_from_dask_none_index_false(tmpdir):
tmpdir = str(tmpdir)
path = os.path.join(tmpdir, "test.parquet")
df2 = ddf.reset_index(drop=True).compute()
df2.to_parquet(path, engine="pyarrow")
ddf3 = dask_cudf.read_parquet(path, index=False)
dd.assert_eq(df2, ddf3)
@pytest.mark.parametrize("write_meta", [True, False])
def test_roundtrip_from_dask_cudf(tmpdir, write_meta):
tmpdir = str(tmpdir)
gddf = dask_cudf.from_dask_dataframe(ddf)
gddf.to_parquet(tmpdir, write_metadata_file=write_meta)
gddf2 = dask_cudf.read_parquet(tmpdir)
dd.assert_eq(gddf, gddf2, check_divisions=write_meta)
def test_roundtrip_none_rangeindex(tmpdir):
fn = str(tmpdir.join("test.parquet"))
gdf = cudf.DataFrame(
{"id": [0, 1, 2, 3], "val": [None, None, 0, 1]},
index=pd.RangeIndex(start=5, stop=9),
)
dask_cudf.from_cudf(gdf, npartitions=2).to_parquet(fn)
ddf2 = dask_cudf.read_parquet(fn)
dd.assert_eq(gdf, ddf2, check_index=True)
def test_roundtrip_from_pandas(tmpdir):
fn = str(tmpdir.join("test.parquet"))
# First without specifying an index
dfp = df.copy()
dfp.to_parquet(fn, engine="pyarrow", index=False)
dfp = dfp.reset_index(drop=True)
ddf2 = dask_cudf.read_parquet(fn)
dd.assert_eq(dfp, ddf2, check_index=True)
# Now, specifying an index
dfp = df.copy()
dfp.to_parquet(fn, engine="pyarrow", index=True)
ddf2 = dask_cudf.read_parquet(fn, index=["index"])
dd.assert_eq(dfp, ddf2, check_index=True)
def test_strings(tmpdir):
fn = str(tmpdir)
dfp = pd.DataFrame(
{"a": ["aa", "bbb", "cccc"], "b": ["hello", "dog", "man"]}
)
dfp.set_index("a", inplace=True, drop=True)
ddf2 = dd.from_pandas(dfp, npartitions=2)
ddf2.to_parquet(fn, engine="pyarrow")
read_df = dask_cudf.read_parquet(fn, index=["a"])
dd.assert_eq(ddf2, read_df.compute().to_pandas())
read_df_cats = dask_cudf.read_parquet(
fn, index=["a"], strings_to_categorical=True
)
dd.assert_eq(read_df_cats.dtypes, read_df_cats.compute().dtypes)
dd.assert_eq(read_df_cats.dtypes[0], "int32")
def test_dask_timeseries_from_pandas(tmpdir):
fn = str(tmpdir.join("test.parquet"))
ddf2 = dask.datasets.timeseries(freq="D")
pdf = ddf2.compute()
pdf.to_parquet(fn, engine="pyarrow")
read_df = dask_cudf.read_parquet(fn)
dd.assert_eq(ddf2, read_df.compute())
@pytest.mark.parametrize("index", [False, None])
@pytest.mark.parametrize("stats", [False, True])
def test_dask_timeseries_from_dask(tmpdir, index, stats):
fn = str(tmpdir)
ddf2 = dask.datasets.timeseries(freq="D")
ddf2.to_parquet(fn, engine="pyarrow", write_index=index)
read_df = dask_cudf.read_parquet(fn, index=index, gather_statistics=stats)
dd.assert_eq(
ddf2, read_df, check_divisions=(stats and index), check_index=index
)
@pytest.mark.parametrize("index", [False, None])
@pytest.mark.parametrize("stats", [False, True])
def test_dask_timeseries_from_daskcudf(tmpdir, index, stats):
fn = str(tmpdir)
ddf2 = dask_cudf.from_cudf(
cudf.datasets.timeseries(freq="D"), npartitions=4
)
ddf2.name = ddf2.name.astype("object")
ddf2.to_parquet(fn, write_index=index)
read_df = dask_cudf.read_parquet(fn, index=index, gather_statistics=stats)
dd.assert_eq(
ddf2, read_df, check_divisions=(stats and index), check_index=index
)
@pytest.mark.parametrize("index", [False, True])
def test_empty(tmpdir, index):
fn = str(tmpdir)
dfp = pd.DataFrame({"a": [11.0, 12.0, 12.0], "b": [4, 5, 6]})[:0]
if index:
dfp.set_index("a", inplace=True, drop=True)
ddf2 = dd.from_pandas(dfp, npartitions=2)
ddf2.to_parquet(fn, write_index=index, engine="pyarrow")
read_df = dask_cudf.read_parquet(fn)
dd.assert_eq(ddf2, read_df.compute())
def test_filters(tmpdir):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine="pyarrow")
a = dask_cudf.read_parquet(tmp_path, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dask_cudf.read_parquet(tmp_path, filters=[("y", "==", "c")])
assert b.npartitions == 1
b = b.compute().to_pandas()
assert (b.y == "c").all()
c = dask_cudf.read_parquet(
tmp_path, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
def test_filters_at_row_group_level(tmpdir):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine="pyarrow", row_group_size=10 / 5)
a = dask_cudf.read_parquet(tmp_path, filters=[("x", "==", 1)])
assert a.npartitions == 1
assert (a.shape[0] == 2).compute()
ddf.to_parquet(tmp_path, engine="pyarrow", row_group_size=1)
b = dask_cudf.read_parquet(tmp_path, filters=[("x", "==", 1)])
assert b.npartitions == 1
assert (b.shape[0] == 1).compute()
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("daskcudf", [True, False])
@pytest.mark.parametrize(
"parts", [["year", "month", "day"], ["year", "month"], ["year"]]
)
def test_roundtrip_from_dask_partitioned(tmpdir, parts, daskcudf, metadata):
tmpdir = str(tmpdir)
df = pd.DataFrame()
df["year"] = [2018, 2019, 2019, 2019, 2020, 2021]
df["month"] = [1, 2, 3, 3, 3, 2]
df["day"] = [1, 1, 1, 2, 2, 1]
df["data"] = [0, 0, 0, 0, 0, 0]
df.index.name = "index"
if daskcudf:
ddf2 = dask_cudf.from_cudf(cudf.from_pandas(df), npartitions=2)
ddf2.to_parquet(
tmpdir, write_metadata_file=metadata, partition_on=parts
)
else:
ddf2 = dd.from_pandas(df, npartitions=2)
ddf2.to_parquet(
tmpdir,
engine="pyarrow",
write_metadata_file=metadata,
partition_on=parts,
)
df_read = dd.read_parquet(tmpdir, engine="pyarrow")
gdf_read = dask_cudf.read_parquet(tmpdir)
# TODO: Avoid column selection after `CudfEngine`
# can be aligned with dask/dask#6534
columns = list(df_read.columns)
assert set(df_read.columns) == set(gdf_read.columns)
dd.assert_eq(
df_read.compute(scheduler=dask.get)[columns],
gdf_read.compute(scheduler=dask.get)[columns],
)
assert gdf_read.index.name == "index"
# Check that we don't have uuid4 file names
for _, _, files in os.walk(tmpdir):
for fn in files:
if not fn.startswith("_"):
assert "part" in fn
if parse_version(dask.__version__) > parse_version("2021.07.0"):
# This version of Dask supports `aggregate_files=True`.
# Check that we can aggregate by a partition name.
df_read = dd.read_parquet(
tmpdir, engine="pyarrow", aggregate_files="year"
)
gdf_read = dask_cudf.read_parquet(tmpdir, aggregate_files="year")
dd.assert_eq(df_read, gdf_read)
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("chunksize", [None, 1024, 4096, "1MiB"])
def test_chunksize(tmpdir, chunksize, metadata):
nparts = 2
df_size = 100
row_group_size = 5
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
"index": np.arange(0, df_size),
}
).set_index("index")
ddf1 = dd.from_pandas(df, npartitions=nparts)
ddf1.to_parquet(
str(tmpdir),
engine="pyarrow",
row_group_size=row_group_size,
write_metadata_file=metadata,
)
if metadata:
path = str(tmpdir)
else:
dirname = str(tmpdir)
files = os.listdir(dirname)
assert "_metadata" not in files
path = os.path.join(dirname, "*.parquet")
ddf2 = dask_cudf.read_parquet(
path,
chunksize=chunksize,
split_row_groups=True,
gather_statistics=True,
)
ddf2.compute(scheduler="synchronous")
dd.assert_eq(ddf1, ddf2, check_divisions=False)
num_row_groups = df_size // row_group_size
if not chunksize:
assert ddf2.npartitions == num_row_groups
else:
assert ddf2.npartitions < num_row_groups
if parse_version(dask.__version__) > parse_version("2021.07.0"):
# This version of Dask supports `aggregate_files=True`.
# Test that it works as expected.
ddf3 = dask_cudf.read_parquet(
path,
chunksize=chunksize,
split_row_groups=True,
gather_statistics=True,
aggregate_files=True,
)
dd.assert_eq(ddf1, ddf3, check_divisions=False)
if not chunksize:
# Files should not be aggregated
assert ddf3.npartitions == num_row_groups
elif chunksize == "1MiB":
# All files should be aggregated into
# one output partition
assert ddf3.npartitions == 1
else:
# Files can be aggregated together, but
# chunksize is not large enough to produce
# a single output partition
assert ddf3.npartitions < num_row_groups
@pytest.mark.parametrize("row_groups", [1, 3, 10, 12])
@pytest.mark.parametrize("index", [False, True])
def test_row_groups_per_part(tmpdir, row_groups, index):
nparts = 2
df_size = 100
row_group_size = 5
file_row_groups = 10 # Known apriori
npartitions_expected = math.ceil(file_row_groups / row_groups) * 2
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
"index": np.arange(0, df_size),
}
)
if index:
df = df.set_index("index")
ddf1 = dd.from_pandas(df, npartitions=nparts)
ddf1.to_parquet(
str(tmpdir),
engine="pyarrow",
row_group_size=row_group_size,
write_metadata_file=True,
)
ddf2 = dask_cudf.read_parquet(
str(tmpdir),
row_groups_per_part=row_groups,
)
dd.assert_eq(ddf1, ddf2, check_divisions=False)
assert ddf2.npartitions == npartitions_expected
@need_create_meta
@pytest.mark.parametrize("partition_on", [None, "a"])
def test_create_metadata_file(tmpdir, partition_on):
tmpdir = str(tmpdir)
# Write ddf without a _metadata file
df1 = cudf.DataFrame({"b": range(100), "a": ["A", "B", "C", "D"] * 25})
df1.index.name = "myindex"
ddf1 = dask_cudf.from_cudf(df1, npartitions=10)
ddf1.to_parquet(
tmpdir,
write_metadata_file=False,
partition_on=partition_on,
)
# Add global _metadata file
if partition_on:
fns = glob.glob(os.path.join(tmpdir, partition_on + "=*/*.parquet"))
else:
fns = glob.glob(os.path.join(tmpdir, "*.parquet"))
dask_cudf.io.parquet.create_metadata_file(
fns,
split_every=3, # Force tree reduction
)
# Check that we can now read the ddf
# with the _metadata file present
ddf2 = dask_cudf.read_parquet(
tmpdir,
gather_statistics=True,
split_row_groups=False,
index="myindex",
)
if partition_on:
ddf1 = df1.sort_values("b")
ddf2 = ddf2.compute().sort_values("b")
ddf2.a = ddf2.a.astype("object")
dd.assert_eq(ddf1, ddf2)
@need_create_meta
def test_create_metadata_file_inconsistent_schema(tmpdir):
# NOTE: This test demonstrates that the CudfEngine
# can be used to generate a global `_metadata` file
# even if there are inconsistent schemas in the dataset.
# Write file 0
df0 = pd.DataFrame({"a": [None] * 10, "b": range(10)})
p0 = os.path.join(tmpdir, "part.0.parquet")
df0.to_parquet(p0, engine="pyarrow")
# Write file 1
b = list(range(10))
b[1] = None
df1 = pd.DataFrame({"a": range(10), "b": b})
p1 = os.path.join(tmpdir, "part.1.parquet")
df1.to_parquet(p1, engine="pyarrow")
# New pyarrow-dataset base can handle an inconsistent
# schema (even without a _metadata file), but computing
# and dtype validation may fail
ddf1 = dask_cudf.read_parquet(str(tmpdir), gather_statistics=True)
# Add global metadata file.
# Dask-CuDF can do this without requiring schema
# consistency.
dask_cudf.io.parquet.create_metadata_file([p0, p1])
# Check that we can still read the ddf
# with the _metadata file present
ddf2 = dask_cudf.read_parquet(str(tmpdir), gather_statistics=True)
# Check that the result is the same with and
# without the _metadata file. Note that we must
# call `compute` on `ddf1`, because the dtype of
# the inconsistent column ("a") may be "object"
# before computing, and "int" after
dd.assert_eq(ddf1.compute(), ddf2)
dd.assert_eq(ddf1.compute(), ddf2.compute())
@pytest.mark.parametrize(
"data",
[
["dog", "cat", "fish"],
[[0], [1, 2], [3]],
[None, [1, 2], [3]],
[{"f1": 1}, {"f1": 0, "f2": "dog"}, {"f2": "cat"}],
[None, {"f1": 0, "f2": "dog"}, {"f2": "cat"}],
],
)
def test_cudf_dtypes_from_pandas(tmpdir, data):
# Simple test that we can read in list and struct types
fn = str(tmpdir.join("test.parquet"))
dfp = | pd.DataFrame({"data": data}) | pandas.DataFrame |
from pathlib import Path
from shutil import copyfile
import pandas as pd
import numpy as np
import unicodedata
from haversine import haversine
import time
import ast
from sklearn.metrics import average_precision_score
import statistics
"""
Evaluate ranking for MAP
"""
def find_closest_distance(altname, gscoords):
"""
This method returns the distance (in kilometers) between the
candidate location and the gold standard coordinates. In the
case that a candidate name in the gazetteer can refer to more
than one entity, we select the entity closest to the gold
standard coordinates.
"""
tCoords = [list(k) for k in altname.values]
distance = 100000 # we instantiate "distance" with an impossibly large distance
for candCoord in tCoords:
candDistance = haversine(candCoord, gscoords)
if candDistance <= distance:
distance = candDistance
return distance
def mapeval_candidates(cand_distance, gazetteer, coords, km, maxCands, metrics, lowercase):
if type(cand_distance) == list:
cand_distance = cand_distance[0]
candidates_fd = sorted(cand_distance.items(), key=lambda kv: kv[1])[:maxCands]
highest = 0.0
try:
highest = candidates_fd[-1][1]
except IndexError:
highest = 0.0
candidates = []
for c in candidates_fd:
candidates.append(c[0])
closest_candidates = []
for cand in candidates:
if lowercase:
candcoords = gazetteer[gazetteer["altname"] == unicodedata.normalize('NFKD', str(cand.lower()))][["lat", "lon"]]
else:
candcoords = gazetteer[gazetteer["altname"] == unicodedata.normalize('NFKD', str(cand))][["lat", "lon"]]
closest_candidates.append(find_closest_distance(candcoords, coords))
y_truearray = []
y_scorearray = []
for i in range(len(closest_candidates)):
if closest_candidates[i] <= km:
y_truearray.append(1)
else:
y_truearray.append(0)
if metrics == "faiss":
if highest == 0.0:
y_scorearray.append(0.0)
else:
y_scorearray.append(1.0 - cand_distance[candidates[i]]/highest)
else:
y_scorearray.append(1.0 - cand_distance[candidates[i]])
return y_truearray, y_scorearray
def evaluate_ranking(gazetteer_name, candrank_dataset, deezymatch_model):
maxCands = 20 # Candidates cutoff for MAP
# if not Path("mapped_results/DeezyMapEval_" + candrank_dataset + "_" + gazetteer_name + "_" + deezymatch_model + ".txt", "w").is_file() and not Path("mapped_results/LevDamMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt").is_file() and not Path("mapped_results/ExactMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt").is_file():
# Load gazetteer (for DeezyMatch)
gazetteer = pd.read_pickle("../datasets/gazetteers/" + gazetteer_name + ".pkl")
gazetteer = gazetteer[gazetteer['lat'].notna()]
gazetteer = gazetteer[gazetteer['lon'].notna()]
gazetteer["altname"] = gazetteer["altname"].str.normalize("NFKD")
# Load gazetteer and lower-case it (for LevDam)
gazetteer_lc = pd.read_pickle("../datasets/gazetteers/" + gazetteer_name + ".pkl")
gazetteer_lc = gazetteer_lc[gazetteer_lc['lat'].notna()]
gazetteer_lc = gazetteer_lc[gazetteer_lc['lon'].notna()]
gazetteer_lc["altname"] = gazetteer_lc["altname"].str.lower().str.normalize("NFKD")
# Load gold standard dataset
datasetdf = | pd.read_pickle("../datasets/candidate_ranking_datasets/" + candrank_dataset + ".pkl") | pandas.read_pickle |
"""
Importing necessary libraires.
"""
import tweepy
import json
import re
import string
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.models import model_from_json
import random
from flask import Flask,render_template,url_for,request
import numpy as np
import emoji
app = Flask(__name__)
"""
Function to render page http://127.0.0.1:5000/
"""
@app.route('/')
def hello(st=''):
print("HOME")
return render_template('home.html',title='home')
"""
Function to render page http://127.0.0.1:5000/analysis
"""
@app.route('/analysis',methods=['POST','GET','OPTIONS'])
def analysis():
"""
Taking search query into the variable 'key'.
"""
key=request.form['InputText']
"""
Performing authentication to access twitter's data.
(Use twitter developer credentials below and uncomment the following piece commented code).
"""
"""
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
"""
"""
Creating an api object using tweepy.
"""
api = tweepy.API (auth)
"""
Fetching tweets and storing them in results array. 'num' variable denotes the number of tweets to be fetched.
"""
results = []
num = 50
for tweet in tweepy.Cursor (api.search, q = key, lang = "en").items(num):
results.append(tweet)
"""
Creating a pandas dataframe to capture tweet information.
"""
dataset=pd.DataFrame()
dataset["tweet_id"]=pd.Series([tweet.id for tweet in results])
dataset["username"]=pd.Series([tweet.author.screen_name for tweet in results])
dataset["text"]=pd.Series([tweet.text for tweet in results])
dataset["followers"]=pd.Series([tweet.author.followers_count for tweet in results])
dataset["hashtags"]=pd.Series([tweet.entities.get('hashtags') for tweet in results])
dataset["emojis"]=pd.Series([','.join(c for c in tweet.text if c in emoji.UNICODE_EMOJI) for tweet in results])
"""
Following piece of code is used to generate wordcloud of the hashtags used in fetched tweets
"""
Hashtag_df = pd.DataFrame(columns=["Hashtag"])
j = 0
for tweet in range(0,len(results)):
hashtag = results[tweet].entities.get('hashtags')
for i in range(0,len(hashtag)):
Htag = hashtag[i]['text']
Hashtag_df.at[j,'Hashtag']=Htag
j = j+1
Hashtag_Combined = " ".join(Hashtag_df['Hashtag'].values.astype(str))
text=" ".join(dataset['text'].values.astype(str))
cleaned_text = " ".join([word for word in text.split()
if word !="https"
and word !="RT"
and word !="co"
])
wc = WordCloud(width=500,height=500,background_color="white", stopwords=STOPWORDS).generate(Hashtag_Combined)
plt.imshow(wc)
plt.axis("off")
r =random.randint(1,101)
st = 'static\hashtag'+ str(r) +'.png'
plt.savefig(st, dpi=300)
"""
Following piece of code is used to get a list of top 5 hashtags
"""
hashtag=Hashtag_Combined.split(" ")
df=pd.DataFrame()
df['hashtags']= | pd.Series([i for i in hashtag]) | pandas.Series |
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
pd.set_option('display.colheader_justify', 'center')
| pd.set_option('display.precision', 3) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: Qualify Tidepool Datasets
version: 0.0.1
created: 2018-02-21
author: <NAME>
dependencies:
* this requires the tidals package, which is automatically downloaded when
you load in the tidepool data analytics development (tda-dev) environment
license: BSD-2-Clause
TODO:
* [] make saving the metadata optional, and by default to no
"""
# %% REQUIRED LIBRARIES
import os
import sys
import argparse
import json
import pandas as pd
import datetime as dt
import importlib
# load tidals package locally if it does not exist globally
if importlib.util.find_spec("tidals") is None:
tidalsPath = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..", "..", "tidepool-analysis-tools"))
if tidalsPath not in sys.path:
sys.path.insert(0, tidalsPath)
import tidals as td
# %% USER INPUTS
codeDescription = "Qualify Tidepool datasets"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument("-d",
"--date-stamp",
dest="dateStamp",
default=dt.datetime.now().strftime("%Y-%m-%d"),
help="date in '%Y-%m-%d' format needed to call unique " +
"donor list (e.g., PHI-2018-03-02-uniqueDonorList)")
parser.add_argument("-o",
"--output-data-path",
dest="dataPath",
default=os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "data")),
help="the output path where the data is stored")
parser.add_argument("-s",
"--start-index",
dest="startIndex",
default=0,
help="donor index (integer) to start at")
parser.add_argument("-e",
"--end-index",
dest="endIndex",
default=-1,
help="donor index (integer) to end at," +
"-1 will result in 1 file if startIndex != 0," +
"and will default to number of unique donors" +
"if startIndex = 0, or endIndex = -2")
parser.add_argument("-q",
"--qualification-criteria",
dest="qualificationCriteria",
default=os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"tidepool-qualification-criteria.json")),
type=argparse.FileType('r'),
help="JSON file to be processed, see " +
"tidepool-qualification-critier.json " +
"for a list of required fields")
args = parser.parse_args()
# %% FUNCTIONS
def defineStartAndEndIndex(args, nDonors):
startIndex = int(args.startIndex)
endIndex = int(args.endIndex)
if endIndex == -1:
if startIndex == 0:
endIndex = nDonors
else:
endIndex = startIndex + 1
if endIndex == -2:
endIndex = nDonors
return startIndex, endIndex
def removeNegativeDurations(df):
if "duration" in list(df):
nNegativeDurations = sum(df.duration < 0)
if nNegativeDurations > 0:
df = df[~(df.duration < 0)]
return df, nNegativeDurations
def addUploadDate(df):
uploadTimes = pd.DataFrame(df[df.type == "upload"].groupby("uploadId").time.describe()["top"])
uploadTimes.reset_index(inplace=True)
uploadTimes.rename(columns={"top": "uploadTime"}, inplace=True)
df = pd.merge(df, uploadTimes, how='left', on='uploadId')
return df
def filterAndSort(groupedDF, filterByField, sortByField):
filterDF = groupedDF.get_group(filterByField).dropna(axis=1, how="all")
filterDF = filterDF.sort_values(sortByField)
return filterDF
def getClosedLoopDays(groupedData, qualCriteria, metadata):
# filter by basal data and sort by time
if "basal" in groupedData.type.unique():
basalData = filterAndSort(groupedData, "basal", "time")
# get closed loop days
nTB = qualCriteria["nTempBasalsPerDayIsClosedLoop"]
tbDataFrame = basalData.loc[basalData.deliveryType == "temp", ["time"]]
tbDataFrame.index = pd.to_datetime(tbDataFrame["time"])
tbDataFrame = tbDataFrame.drop(["time"], axis=1)
tbDataFrame["basal.temp.count"] = 1
nTempBasalsPerDay = tbDataFrame.resample("D").sum()
closedLoopDF = pd.DataFrame(nTempBasalsPerDay,
index=nTempBasalsPerDay.index.date)
closedLoopDF["date"] = nTempBasalsPerDay.index.date
closedLoopDF["basal.closedLoopDays"] = \
closedLoopDF["basal.temp.count"] >= nTB
nClosedLoopDays = closedLoopDF["basal.closedLoopDays"].sum()
# get the number of days with 670g
basalData["date"] = pd.to_datetime(basalData.time).dt.date
bdGroup = basalData.groupby("date")
topPump = bdGroup.deviceId.describe()["top"]
med670g = pd.DataFrame(topPump.str.contains("1780")).rename(columns={"top":"670g"})
med670g.reset_index(inplace=True)
n670gDays = med670g["670g"].sum()
else:
closedLoopDF = pd.DataFrame(columns=["basal.closedLoopDays", "date"])
med670g = pd.DataFrame(columns=["670g", "date"])
nClosedLoopDays = 0
n670gDays = 0
metadata["basal.closedLoopDays.count"] = nClosedLoopDays
metadata["med670gDays.count"] = n670gDays
return closedLoopDF, med670g, metadata
def removeInvalidCgmValues(df):
nBefore = len(df)
# remove values < 38 and > 402 mg/dL
df = df.query("(value >= 2.109284236597303) and" +
"(value <= 22.314006924003046)")
nRemoved = nBefore - len(df)
return df, nRemoved
def removeDuplicates(df, criteriaDF):
nBefore = len(df)
df = df.loc[~(df[criteriaDF].duplicated())]
df = df.reset_index(drop=True)
nDuplicatesRemoved = nBefore - len(df)
return df, nDuplicatesRemoved
def removeCgmDuplicates(df, timeCriterion):
if timeCriterion in df:
df.sort_values(by=[timeCriterion, "uploadTime"],
ascending=[False, False],
inplace=True)
dfIsNull = df[df[timeCriterion].isnull()]
dfNotNull = df[df[timeCriterion].notnull()]
dfNotNull, nDuplicatesRemoved = removeDuplicates(dfNotNull, [timeCriterion, "value"])
df = pd.concat([dfIsNull, dfNotNull])
df.sort_values(by=[timeCriterion, "uploadTime"],
ascending=[False, False],
inplace=True)
else:
nDuplicatesRemoved = 0
return df, nDuplicatesRemoved
def getStartAndEndTimes(df, dateTimeField):
dfBeginDate = df[dateTimeField].min()
dfEndDate = df[dateTimeField].max()
return dfBeginDate, dfEndDate
def getCalculatorCounts(groupedData, metadata):
if "wizard" in groupedData.type.unique():
# filter by calculator data and sort by time
calculatorData = filterAndSort(groupedData, "wizard", "time")
# add dayIndex
calculatorData["dayIndex"] = pd.DatetimeIndex(calculatorData["time"]).date
# get rid of duplicates
calculatorData, nDuplicatesRemoved = \
removeDuplicates(calculatorData, ["time", "bolus"])
metadata["calculator.duplicatesRemoved.count"] = nDuplicatesRemoved
# get start and end times
calculatorBeginDate, calculatorEndDate = getStartAndEndTimes(calculatorData, "dayIndex")
metadata["calculator.beginDate"] = calculatorBeginDate
metadata["calculator.endDate"] = calculatorEndDate
# group by day and get number of calculator boluses
catDF = calculatorData.groupby(calculatorData["dayIndex"])
calculatorPerDay = pd.DataFrame()
calculatorPerDay["calculator.count"] = catDF.bolus.count()
calculatorPerDay["date"] = calculatorPerDay.index
else:
calculatorPerDay = pd.DataFrame(columns=["calculator.count", "date"])
return calculatorPerDay, metadata
def getListOfDexcomCGMDays(df):
# search for dexcom cgms
searchfor = ["Dex", "tan", "IR", "unk"]
# create dexcom boolean field
if "deviceId" in df.columns.values:
totalCgms = len(df.deviceId.notnull())
df["dexcomCGM"] = df.deviceId.str.contains("|".join(searchfor))
percentDexcomCGM = df.dexcomCGM.sum() / totalCgms * 100
return df, percentDexcomCGM
def dexcomCriteria(df):
# if day is closed loop or non-dexcom set to 0
isClosedLoop = (df["basal.closedLoopDays"].fillna(False))
isNonDexcom = ~(df["cgm.dexcomOnly"].fillna(False))
df.loc[(isClosedLoop | isNonDexcom),
["bolus.count", "cgm.count"]] = 0
return df
def isQualifyingDay(df, bolusCriteria, percentCgmCriteria, cgmPoints1Day):
df["cgm.percentage"] = df["cgm.count"] / cgmPoints1Day
df["qualifyingDay"] = ((df["bolus.count"] >= bolusCriteria) &
(df["cgm.percentage"] >= percentCgmCriteria))
# calculate the gaps in the data and group them
df["nonQualifyingDay"] = ~df["qualifyingDay"]
df["qualifyAndNotQualifyGroups"] = \
((df.qualifyingDay != df.qualifyingDay.shift()).cumsum())
df["gapGroup"] = df["qualifyAndNotQualifyGroups"] * df["nonQualifyingDay"]
return df
def getSummaryStats(df, dayStatsDF):
df["contiguous.maxCgmPercentage"] = dayStatsDF["cgm.percentage"].max()
numberQualifyingDays = dayStatsDF.qualifyingDay.sum()
df["qualifyingDays.count"] = numberQualifyingDays
numberContiguousDays = len(dayStatsDF)
df["contiguous.count"] = numberContiguousDays
percentQualifyingDays = round((numberQualifyingDays /
numberContiguousDays) * 100, 1)
df["qualifyingDays.percent"] = percentQualifyingDays
avgBolusCalculations = \
round(dayStatsDF.loc[dayStatsDF.qualifyingDay == 1,
"calculator.count"].mean(), 1)
df["qualfiyingDays.avgBolusCalculatorCount"] = avgBolusCalculations
return df
def getQualifyingTier(df, criteriaName, contDayCriteria,
avgBolusCalculationsCriteria, percQualDayCriteria,
maxGapToContRatioCriteria):
tempDF = pd.DataFrame(columns=["avgBolusCalculationsPerDay",
"numberContiguousDays",
"percentQualifyingDays",
"maxGapToContiguousRatio",
"tier"])
for i in range(0, len(df)-(contDayCriteria-1)):
tempIndex = min(i+contDayCriteria, len(df))
numberContiguousDays = df["date"].iloc[i:tempIndex].count()
tempDF.loc[i, "numberContiguousDays"] = numberContiguousDays
avgBolusCalculationsPerDay = \
df["calculator.count"].iloc[i:tempIndex].mean()
tempDF.loc[i, "avgBolusCalculationsPerDay"] = \
avgBolusCalculationsPerDay
percentQualifyingDays = \
df.qualifyingDay.iloc[i:tempIndex].sum() / contDayCriteria * 100
tempDF.loc[i, "percentQualifyingDays"] = percentQualifyingDays
gapGroups = \
df.gapGroup.iloc[i:tempIndex].loc[df.gapGroup > 0].astype(str)
if len(gapGroups) > 0:
maxGapToContiguousRatio = \
gapGroups.describe()["freq"] / contDayCriteria * 100
else:
maxGapToContiguousRatio = 0
tempDF.loc[i, "maxGapToContiguousRatio"] = maxGapToContiguousRatio
tier = (numberContiguousDays == contDayCriteria
and avgBolusCalculationsPerDay >= avgBolusCalculationsCriteria
and percentQualifyingDays >= percQualDayCriteria
and maxGapToContiguousRatio <= maxGapToContRatioCriteria)
tempDF.loc[i, "tier"] = tier
df = pd.concat([df, tempDF.add_prefix(criteriaName + ".")], axis=1)
# if the dataset qualified
tierName = criteriaName + "." + "tier"
if sum(df[tierName].fillna(0) * 1) > 0:
tierGroupName = criteriaName + ".group"
tierGapGroupName = criteriaName + ".gapGroup"
df[tierGroupName] = ((df[tierName] != df[tierName].shift()).cumsum())
df[tierGapGroupName] = df[tierGroupName] * df[tierName]
groupObj = df[df[tierGapGroupName] > 0].groupby(tierGapGroupName)
biggestGroup = groupObj[tierGroupName].count().idxmax()
qualifiedBeginDate = groupObj.get_group(biggestGroup).date.min()
qualifiedEndDate = \
groupObj.get_group(biggestGroup).date.max() + \
pd.Timedelta(days=contDayCriteria)
nDaysToDeliever = (qualifiedEndDate - qualifiedBeginDate).days
qualifyingResults = {"qualified": True,
"qualified.beginDate": qualifiedBeginDate,
"qualified.endDate": qualifiedEndDate,
"qualified.nDaysToDeliever": nDaysToDeliever}
else:
qualifyingResults = {"qualified": False}
return df, qualifyingResults
def qualify(df, metaDF, q, idx):
q["maxGapToContigRatio"]
metaDF[q["tierAbbr"] + ".topTier"] = q["tierAbbr"] + "0"
for j in range(0, len(q["tierNames"])):
df, qualifyingResults = \
getQualifyingTier(df,
q["tierNames"][j],
q["minContiguousDays"][j],
q["avgBolusCalcsPerDay"][j],
q["percentDaysQualifying"][j],
q["maxGapToContigRatio"][j])
qrDF = pd.DataFrame(qualifyingResults, index=[idx]). \
add_prefix(q["tierNames"][j] + ".")
metaDF = pd.concat([metaDF, qrDF], axis=1)
if qualifyingResults["qualified"]:
metaDF[q["tierAbbr"] + ".topTier"] = \
q["tierNames"][j]
return df, metaDF
# %% GLOBAL VARIABLES
qualifiedOn = dt.datetime.now().strftime("%Y-%m-%d")
phiDateStamp = "PHI-" + args.dateStamp
qualCriteria = json.load(args.qualificationCriteria)
criteriaMaxCgmPointsPerDay = \
1440 / qualCriteria["timeFreqMin"]
# input folder(s)
donorFolder = os.path.join(args.dataPath, phiDateStamp + "-donor-data")
if not os.path.isdir(donorFolder):
sys.exit("{0} is not a directory".format(donorFolder))
donorJsonData = os.path.join(donorFolder,
phiDateStamp + "-donorJsonData", "")
if not os.path.isdir(donorJsonData):
sys.exit("{0} is not a directory".format(donorJsonData))
# create output folder(s)
donorQualifyFolder = os.path.join(donorFolder,
args.dateStamp + "-qualified", "")
if not os.path.exists(donorQualifyFolder):
os.makedirs(donorQualifyFolder)
# load in list of unique donors
donorPath = os.path.join(donorFolder, phiDateStamp + "-uniqueDonorList.csv")
uniqueDonors = pd.read_csv(donorPath, index_col="dIndex")
nUniqueDonors = len(uniqueDonors)
allMetaData = pd.DataFrame()
# define start and end index
startIndex, endIndex = defineStartAndEndIndex(args, nUniqueDonors)
# %% START OF CODE
# loop through each donor
for dIndex in range(startIndex, endIndex):
# load in all data for user
metadata = pd.DataFrame(index=[dIndex])
userID = uniqueDonors.loc[dIndex, "userID"]
phiUserID = "PHI-" + userID
jsonFileName = os.path.join(donorJsonData, phiUserID + ".json")
fileSize = os.stat(jsonFileName).st_size
if os.path.exists(jsonFileName):
metadata["fileSize"] = fileSize
if fileSize > 1000:
data = td.load.load_json(jsonFileName)
# attach upload time to each record, for resolving duplicates
if "upload" in data.type.unique():
data = addUploadDate(data)
# filter by only hybridClosedLoop data
if "hClosedLoop" in qualCriteria["name"]:
if "basal" in data.type.unique():
data["date"] = pd.to_datetime(data.time).dt.date
bd = data[(data.type == "basal") & (data.deliveryType == "temp")]
tempBasalCounts = pd.DataFrame(bd.groupby("date").deliveryType.count()).reset_index()
tempBasalCounts.rename({"deliveryType": "tempBasalCounts"}, axis=1, inplace=True)
data = pd.merge(data, tempBasalCounts, on="date")
data = data[data.tempBasalCounts >= qualCriteria["nTempBasalsPerDayIsClosedLoop"]]
else:
data = pd.DataFrame(columns=list(data))
# filter by only 670g data
if "m670g" in qualCriteria["name"]:
data = data[data.deviceId.str.contains("1780")]
# flatten json
data = td.clean.flatten_json(data)
if (("cbg" in data.type.unique()) and ("bolus" in data.type.unique())):
# get rid of all negative durations
data, numberOfNegativeDurations = removeNegativeDurations(data)
metadata["all.negativeDurationsRemoved.count"] = numberOfNegativeDurations
# group data by type
groupedData = data.groupby(by="type")
# %% CGM
# filter by cgm and sort by time
cgmData = filterAndSort(groupedData, "cbg", "time")
# get rid of cbg values too low/high (< 38 & > 402 mg/dL)
cgmData, numberOfInvalidCgmValues = removeInvalidCgmValues(cgmData)
metadata["cgm.invalidValues.count"] = numberOfInvalidCgmValues
# get rid of duplicates that have the same ["deviceTime", "value"]
cgmData, nDuplicatesRemovedDeviceTime = removeCgmDuplicates(cgmData, "deviceTime")
metadata["cgm.nDuplicatesRemovedDeviceTime.count"] = nDuplicatesRemovedDeviceTime
# get rid of duplicates that have the same ["time", "value"]
cgmData, nDuplicatesRemovedUtcTime = removeCgmDuplicates(cgmData, "time")
metadata["cgm.nDuplicatesRemovedUtcTime.count"] = \
nDuplicatesRemovedUtcTime
# round time to the nearest 5 minutes
cgmData = td.clean.round_time(cgmData, timeIntervalMinutes=5, timeField="time",
roundedTimeFieldName="roundedTime", verbose=False)
# get rid of duplicates that have the same "roundedTime"
cgmData, nDuplicatesRemovedRoundedTime = removeDuplicates(cgmData, "roundedTime")
metadata["cgm.nDuplicatesRemovedRoundedTime.count"] = nDuplicatesRemovedRoundedTime
# calculate day or date of data
cgmData["dayIndex"] = cgmData.roundedTime.dt.date
# get start and end times
cgmBeginDate, cgmEndDate = getStartAndEndTimes(cgmData, "dayIndex")
metadata["cgm.beginDate"] = cgmBeginDate
metadata["cgm.endDate"] = cgmEndDate
# get a list of dexcom cgms
cgmData, percentDexcom = getListOfDexcomCGMDays(cgmData)
metadata["cgm.percentDexcomCGM"] = percentDexcom
# group by date (day) and get stats
catDF = cgmData.groupby(cgmData["dayIndex"])
cgmRecordsPerDay = \
pd.DataFrame(catDF.value.count()). \
rename(columns={"value": "cgm.count"})
dayDate = catDF.dayIndex.describe()["top"]
dexcomCGM = catDF.dexcomCGM.describe()["top"]
nTypesCGM = catDF.dexcomCGM.describe()["unique"]
cgmRecordsPerDay["cgm.dexcomOnly"] = \
(dexcomCGM & (nTypesCGM == 1))
cgmRecordsPerDay["date"] = cgmRecordsPerDay.index
# %% BOLUS
# filter by bolus and sort by time
bolusData = filterAndSort(groupedData, "bolus", "time")
# get rid of duplicates
bolusData, nDuplicatesRemoved = removeDuplicates(bolusData, ["time", "normal"])
metadata["bolus.duplicatesRemoved.count"] = nDuplicatesRemoved
# calculate day or date of data
bolusData["dayIndex"] = pd.DatetimeIndex(bolusData.time).date
# get start and end times
bolusBeginDate, bolusEndDate = getStartAndEndTimes(bolusData,
"dayIndex")
metadata["bolus.beginDate"] = bolusBeginDate
metadata["bolus.endDate"] = bolusEndDate
# group by date and get bolusRecordsPerDay
catDF = bolusData.groupby(bolusData["dayIndex"])
bolusRecordsPerDay = \
pd.DataFrame(catDF.subType.count()). \
rename(columns={"subType": "bolus.count"})
bolusRecordsPerDay["date"] = bolusRecordsPerDay.index
# %% GET CALCULATOR DATA (AKA WIZARD DATA)
calculatorRecordsPerDay, metadata = getCalculatorCounts(groupedData, metadata)
# %% GET CLOSED LOOP DAYS WITH TEMP BASAL DATA
isClosedLoopDay, is670g, metadata = \
getClosedLoopDays(groupedData, qualCriteria, metadata)
# %% CONTIGUOUS DATA
# calculate the start and end of contiguous data
contiguousBeginDate = max(cgmBeginDate, bolusBeginDate)
contiguousEndDate = min(cgmEndDate, bolusEndDate)
metadata["contiguous.beginDate"] = contiguousBeginDate
metadata["contiguous.endDate"] = contiguousEndDate
# create a dataframe over the contiguous time series
rng = pd.date_range(contiguousBeginDate, contiguousEndDate).date
contiguousData = pd.DataFrame(rng, columns=["date"])
# merge data
contiguousData = pd.merge(contiguousData, bolusRecordsPerDay,
on="date", how="left")
contiguousData = pd.merge(contiguousData, cgmRecordsPerDay,
on="date", how="left")
contiguousData = pd.merge(contiguousData, calculatorRecordsPerDay,
on="date", how="left")
contiguousData = pd.merge(contiguousData, isClosedLoopDay,
on="date", how="left")
contiguousData = pd.merge(contiguousData, is670g,
on="date", how="left")
# fill in nan's with 0s
for dataType in ["bolus", "cgm", "calculator", "basal.temp"]:
contiguousData[dataType + ".count"] = \
contiguousData[dataType + ".count"].fillna(0)
if ((len(contiguousData) > 0) &
(sum(contiguousData["cgm.count"] > 0) > 0) &
(sum(contiguousData["bolus.count"] > 0) > 0)):
# create an output folder
userQualifyFolder = os.path.join(donorQualifyFolder, userID)
if not os.path.exists(userQualifyFolder):
os.makedirs(userQualifyFolder)
# %% QUALIFICATION AT DAY LEVEL
# dexcom specific qualification criteria
if qualCriteria["name"] == "dexcom":
contiguousData = dexcomCriteria(contiguousData)
# determine if each day qualifies
contiguousData = \
isQualifyingDay(contiguousData,
qualCriteria["bolusesPerDay"],
qualCriteria["cgmPercentPerDay"],
criteriaMaxCgmPointsPerDay)
# calcuate summary stats
metadata = getSummaryStats(metadata, contiguousData)
# %% QUALIFICATION OF DATASET
contiguousData, metadata = qualify(contiguousData, metadata,
qualCriteria, dIndex)
# %% SAVE RESULTS
contiguousData.index.name = "dayIndex"
dSFileName = os.path.join(
userQualifyFolder, userID + "-qualified-as-" +
metadata[qualCriteria["tierAbbr"] + ".topTier"].values[0] +
"-on-" + qualifiedOn + "-for-" + qualCriteria["name"] +
"-dayStats.csv")
contiguousData.to_csv(dSFileName)
# append meta data to the user results
allMetaData = | pd.concat([allMetaData, metadata], axis=0, sort=False) | pandas.concat |
import pandas as pd
import numpy as np
from uszipcode import SearchEngine
from uszipcode.model import ZipcodeTypeEnum
from multiprocessing import Pool
import itertools
import csv
import os
def get_location_demographics_from_dict(address, result_limit=None, savepath='', by_city_state=True):
"""
Takes in a dictionary and returns the lat long based on the maximum amount of detail able to be provided.
If an address is not able to be found, it will write the city and state to a a csv titled 'bad_address.csv' in the local directory.
Parameters
----------
address: dict
{
'city': str or None,
'state': str or None,
'zip': str or None,
}
Uses the maximumly detailed address to retunr a lat, long tuple
result_limit: int
Limits the number of zip codes returned. Use None for all zip codes.
savepath: str
The location to save the city-state pairs that are invalid. This should be a directory and not a file name.
by_city_state: bool
Dictates whether the uszipcodes search function should use a city-state pair or a zipcode.
Both options are set inside the address: dict.
Returns
-------
result: list of SimpeZipcode(s)
All potential matches for the provided input.
Examples
--------
address = {
'city': 'New York',
'state': 'NY',
'zip': None,
'country': 'US'
}
print(len(get_location_demographics_from_dict(address, result_limit=None))) # There are 101 zipcodes for NYC in the Database.
>>> 101
"""
# optimize input address for lookup
address['city'] = address['city'].title().lstrip().rstrip()
address['state'] = address['state'].upper().lstrip().rstrip()
address['zip'] = str(address['zip'])[:5]
engine = SearchEngine(
simple_or_comprehensive=SearchEngine.SimpleOrComprehensiveArgEnum.simple
)
try:
if by_city_state:
result = engine.by_city_and_state(city=address['city'], state=address['state'], zipcode_type=ZipcodeTypeEnum.Standard, returns=result_limit)
else:
result = engine.by_zipcode(zipcode=address['zip'])
except ValueError as v:
# print('Could not find value for City:', address['city'], 'State:',address['state'], 'skipping instead.')
# trigger function to drop these records in a CSV function for later analysis
with open(os.path.abspath(f'{savepath}/value_errors.csv'), 'a', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
row = address['row'].to_frame().T.values.flatten().tolist()
if f.tell()==0:
header = ['Zip', 'City', 'State', 'Value Error']
header += [i for i in range(0, len(row))]
writer.writerow(header)
data = [address['zip'], address['city'], address['state'], v]
data += row
writer.writerow(data)
return []
return result
def construct_municipal_area(address, result_limit=None, savepath='', by_city_state=True):
"""
Takes in an address dictionary and returns a pandas dataframe of all zipcodes that correspond to that municipal area.
This dataframe can be used to create flexible geographic generalizations.
Parameters
----------
address: dict
{
'city': str or None,
'state': str or None,
'zip': str or None,
}
result_limit: int or None
Set to None for the best result. Otherwise limits the number of zipcodes.
savepath: str
The location to save the data while run is in-progress. Allows for results to be inspected prior to completion. This should be a directory and not a file name.
by_city_state: bool
Dictates whether the uszipcodes search function should use a city-state pair or a zipcode.
Both options are set inside the address: dict.
Returns
-------
pd.DataFrame
A pandas dataframe with a variety of metrics by zipcode in accordance to the inputed address.
Example
-------
df = construct_municipal_area(address={
'city': 'New York',
'state': 'NY'
}, result_limit=None)
print(df.shape)
>>> (101, 12)
"""
simple_zipcodes = get_location_demographics_from_dict(address, result_limit=result_limit, savepath=savepath, by_city_state=by_city_state)
if by_city_state and len(simple_zipcodes) == 0:
return | pd.DataFrame() | pandas.DataFrame |
from sklearn import datasets
from sklearn.datasets import load_breast_cancer
from tensorflow import keras
import pandas as pd
import numpy as np
from src.auxiliary_functions.auxiliary_functions import fd
def fetch_data_set(name: str, samples_per_class_synthetic: int = 100, noise_synthetic: float = 0.1):
"""
Loads the data sets.
Args:
Args:
name: str
Name of the data set.
data set name: # samples / # features / # classes
- 'abalone': 4067 / 10 / 16
- 'banknote': 1372 / 4 / 2
- 'cancer': 569 / 30 / 2
- 'digits': 1797 / 64 / 10
- 'htru2': 17898 / 8 / 2
- 'iris': 150 / 4 / 3
- 'madelon': 2600 / 500 / 2
- 'seeds': 210 / 7 / 3
- 'sonar': 208 / 60 / 2
- 'spam': 4601 / 57 / 2
- 'synthetic': 2 x samples_per_class_synthetic / 3 / 2
- 'voice': 126 / 310 / 2
- 'wine': 178 / 13 / 3
samples_per_class_synthetic: int, Optional
(Default is 100.)
noise_synthetic: int, Optional
(Default is 0.1.)
Returns:
X: np.ndarray
Data.
y: np.ndarray
Labels.
"""
if name == "abalone":
X, y = download_abalone()
elif name == 'banknote':
X, y = download_banknote()
elif name == 'cancer':
X, y = download_cancer()
elif name == 'digits':
X, y = download_digits()
elif name == 'htru2':
X, y = download_htru2()
elif name == 'iris':
X, y = download_iris()
elif name == 'madelon':
X, y = download_madelon()
elif name == 'sonar':
X, y = download_sonar()
elif name == 'spam':
X, y = download_spam()
elif name == 'synthetic':
X, y = create_synthetic_data(samples_per_class_synthetic, noise_synthetic)
elif name == 'seeds':
X, y = download_seeds()
elif name == 'voice':
X, y = download_voice()
elif name == 'wine':
X, y = download_wine()
else:
X, y = None, None
print("No valid data set was selected.")
return fd(X), fd(y)
def download_abalone():
"""
Downloads the 'abalone' data set, turns the 'Sex' category to three numerical features: 'Male', 'Female', and
'Infant', and then delets all classes except the ones with {5, 6, ..., 20} 'Rings', ultimately culminating in a data
set of 4067 samples with 10 features 'Male', 'Female', 'Infant', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', and 'Shell weight' and the label 'Rings'.
Returns:
X: np.array
Data.
y: np.array
Labels.
Data set information:
Predicting the age of abalone from physical measurements. The age of abalone is determined by cutting the shell
through the cone, staining it, and counting the number of rings through a microscope -- a boring and time-
consuming task. Other measurements, which are easier to obtain, are used to predict the age. Further
information, such as weather patterns and location (hence food availability) may be required to solve the
problem. From the original data examples with missing values were removed (the majority having the predicted
value missing), and the ranges of the continuous values have been scaled for use with an ANN (by dividing by
200).
Attribute information:
Given is the attribute name, attribute type, the measurement unit and a brief description. The number of rings
is the value to predict: either as a continuous value or as a classification problem.
Name / Data Type / Measurement Unit / Description
-----------------------------
Sex / nominal / -- / M, F, and I (infant)
Length / continuous / mm / Longest shell measurement
Diameter / continuous / mm / perpendicular to length
Height / continuous / mm / with meat in shell
Whole weight / continuous / grams / whole abalone
Shucked weight / continuous / grams / weight of meat
Viscera weight / continuous / grams / gut weight (after bleeding)
Shell weight / continuous / grams / after being dried
Rings / integer / -- / +1.5 gives the age in years
Class distribution:
Class Examples
----- --------
1 1
2 1
3 15
4 57
5 115
6 259
7 391
8 568
9 689
10 634
11 487
12 267
13 203
14 126
15 103
16 67
17 58
18 42
19 32
20 26
21 14
22 6
23 9
24 2
25 1
26 1
27 2
29 1
----- ----
Total 4177
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("abalone", "https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data")
column_names = ['Sex', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']
dataset = pd.read_csv(dataset_path, names=column_names)
cleanup_nums = {"Sex": {"M": 1, "F": 2, "I": 3}}
dataset = dataset.replace(cleanup_nums)
dataset['Sex'] = dataset['Sex'].map({1: 'Male', 2: 'Female', 3: 'Infant'})
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
dataset = dataset[['Male', 'Female', 'Infant', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']]
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
smaller = (y <= 20).flatten()
X = X[smaller, :]
y = y[smaller, :]
larger = (y >= 5).flatten()
X = X[larger, :]
y = y[larger, :]
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 4067, "Wrong number of samples."
assert X.shape[1] == 10, "Wrong number of features."
return X, y
def download_banknote():
"""
Downloads the 'banknote' data set, a data set of 1372 samples with 4 features 'Variance of wavelet transformed
image', 'Skewness of wavelet transformed image ','Curtosis of wavelet transformed image', and 'Entropy of image'.
The labels indicate whether a banknote is fake or not.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("banknote", "https://archive.ics.uci.edu/ml/machine-learning-databases/00267/data_banknote_authentication.txt")
column_names = ['Variance of wavelet transformed image', 'Skewness of wavelet transformed image ',
'Curtosis of wavelet transformed image', 'Entropy of image', 'Class']
dataset = pd.read_csv(dataset_path, names=column_names)
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 1372, "Wrong number of samples."
assert X.shape[1] == 4, "Wrong number of features."
return X, y
def download_cancer():
"""Downloads the 'cancer' data set. It consists of 569 samples of 30 features, which are used to predict whether a
tumor is benign or malignant.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
X, y = load_breast_cancer(return_X_y=True)
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 569, "Wrong number of samples."
assert X.shape[1] == 30, "Wrong number of features."
return X, y
def download_digits():
"""
Downloads the 'digits' data set, a data set of 1797 samples with 64 features and 10 classes. The goal is to determine
the hand-written number corresponding to each sample.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
digits = datasets.load_digits()
X = digits.data
y = digits.target
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 1797, "Wrong number of samples."
assert X.shape[1] == 64, "Wrong number of features."
return X, y
def download_htru2():
"""
Downloads the 'htru2' data set, a data set of 17898 samples with 8 features and 2 classes. Candidates must be
classified in to pulsar and non-pulsar classes to aid discovery.
htru2 is a data set which describes a sample of pulsar candidates collected during the High Time Resolution Universe
Survey (South) [1].
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
[1] <NAME> et al., 'The High Time Resolution Universe Pulsar Survey - I. System Configuration and Initial
Discoveries',2010, Monthly Notices of the Royal Astronomical Society, vol. 409, pp. 619-627.
DOI: 10.1111/j.1365-2966.2010.17325.x
[2] <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
try:
dataset = pd.read_csv("data_sets/HTRU_2/HTRU_2.csv", header=None, engine='python')
except:
dataset = pd.read_csv("../data_sets/HTRU_2/HTRU_2.csv", header=None, engine='python')
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 17898, "Wrong number of samples."
assert X.shape[1] == 8, "Wrong number of features."
return X, y
def download_iris():
"""
Downloads the 'iris' data set, a data set of 150 samples with 4 features 'sepal length in cm', 'sepal width in cm',
'petal length in cm', and 'petal width in cm'. The goal is to determine to which of the three classes each sample
belongs.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
iris = datasets.load_iris()
X = iris.data
y = iris.target
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 150, "Wrong number of samples."
assert X.shape[1] == 4, "Wrong number of features."
return X, y
def download_madelon():
"""
Downloads the training and validation samples of the 'madelon' data set, a binary classification data set
totalling 2600 samples with 500 features.
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>, 2004. Result analysis of the NIPS 2003 feature
selection challenge. In: NIPS. [Web Link].
[2] <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
path_X_train = keras.utils.get_file("madelon_train_data", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.data")
X_train = pd.read_csv(path_X_train, sep=" ", header=None).to_numpy()[:, :-1]
path_y_train = keras.utils.get_file("madelon_train_labels", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.labels")
y_train = pd.read_csv(path_y_train, sep=" ", header=None).to_numpy()
path_X_valid = keras.utils.get_file("madelon_valid_data", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_valid.data")
X_valid = pd.read_csv(path_X_valid, sep=" ", header=None).to_numpy()[:, :-1]
path_y_valid = keras.utils.get_file("madelon_valid_labels", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/madelon_valid.labels")
y_valid = | pd.read_csv(path_y_valid, sep=" ", header=None) | pandas.read_csv |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overridden in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
pandas.Index.notna : boolean inverse of isna.
pandas.Index.dropna : omit entries with missing values.
pandas.isna : top-level isna.
Series.isna : detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.isna: inverse of notna
pandas.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
pandas.Series.sort_values : Sort values of a Series.
pandas.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
shifted index
See Also
--------
Series.shift : Shift values of Series.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self).__name__))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
# TODO: is_dtype_union_equal is a hack around
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workaround can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.unique1d(
Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Series, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(series)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
pandas.MultiIndex.get_level_values : get values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._ndarray_values,
indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
if is_categorical(target):
target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return _ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get('name'):
names = [attributes.get('name')] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['copy'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = com._not_none(*self.names)
other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self._ndarray_values],
[other._ndarray_values],
how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._ndarray_values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._ndarray_values
ov = other._ndarray_values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
---------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (OverflowError, ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
_index_shared_docs['_maybe_cast_slice_bound'] = """
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def _get_loc_only_exact_matches(self, key):
"""
This is overridden on subclasses (namely, IntervalIndex) to control
get_slice_bound.
"""
return self.get_loc(key)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self._get_loc_only_exact_matches(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
See Also
--------
Index.get_loc : Get location for a single label
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._ndarray_values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If none of the labels are found in the selected axis
"""
arr_dtype = 'object' if self.dtype == 'object' else None
labels = com._index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise KeyError(
'labels %s not contained in axis' % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
_index_shared_docs['index_unique'] = (
"""
Return unique values in the index. Uniques are returned in order
of appearance, this does NOT sort.
Parameters
----------
level : int or str, optional, default None
Only return values from specified level (for MultiIndex)
.. versionadded:: 0.23.0
Returns
-------
Index without duplicates
See Also
--------
unique
Series.unique
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = super(Index, self).unique()
return self._shallow_copy(result)
def drop_duplicates(self, keep='first'):
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : equivalent method on Series
DataFrame.drop_duplicates : equivalent method on DataFrame
Index.duplicated : related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
return super(Index, self).drop_duplicates(keep=keep)
def duplicated(self, keep='first'):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
Returns
-------
numpy.ndarray
See Also
--------
pandas.Series.duplicated : Equivalent method on pandas.Series
pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
pandas.Index.drop_duplicates : Remove duplicate values from Index
"""
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@ | Appender(_index_shared_docs['dropna']) | pandas.util._decorators.Appender |
import json
import plotly
import re
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, sent_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Scatter
from sklearn.externals import joblib
from sklearn.base import BaseEstimator, TransformerMixin
from sqlalchemy import create_engine
import nltk
#nltk.download(['punkt', 'wordnet','stopwords'])
app = Flask(__name__)
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
'''
extract information whether text starts with verb or verbal phrase
can be used as estimator in sklearn (transform)
returns:
0 or 1
'''
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
try:
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return 1
except:
return 0
return 0
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def tokenize(text):
'''
simple tokenization: keep only chars and numbers, convert to lowercase, tokenize and lemmatize using nltk
text: str that will be tokenized
returns new_tokens (list of extracted tokens)
'''
#remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
#get tokens
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
new_tokens = []
for tok in tokens:
new_tokens.append(lemmatizer.lemmatize(tok).strip())
return new_tokens
# load data
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = | pd.read_sql_table('messages', engine) | pandas.read_sql_table |
"""
Indicator data preprocessing module.
The low-level metrics do not correspond to the high-level SLAs,
and the indicators are corresponding and merged according to
the method of time stamp proximity search..
"""
import time
import sys
import pandas as pd
import numpy as np
class Preprocess(object):
def __init__(self, metrics, qos, output):
self.metrics = metrics
self.qos = qos
self.output = output
def execute(self) -> None:
"""
Execute preprocessing
"""
self.__load_and_generate_output()
def __load_and_generate_output(self):
# timestamp,context-switches,branch-misses,......
metrics = pd.read_table(self.metrics, header=0, sep=',', index_col=0)
# timestamp qos
qos = | pd.read_table(self.qos, header=0, index_col=0) | pandas.read_table |
#coding=utf-8
import tushare as ts
import talib as ta
import numpy as np
import pandas as pd
import os,time,sys,re,datetime
import csv
import scipy
import smtplib
from email.mime.text import MIMEText
#from email.MIMEMultipart import MIMEMultipart
#获取股票列表
#code,代码 name,名称 industry,所属行业 area,地区 pe,市盈率 outstanding,流通股本 totals,总股本(万) totalAssets,总资产(万)liquidAssets,流动资产
# fixedAssets,固定资产 reserved,公积金 reservedPerShare,每股公积金 eps,每股收益 bvps,每股净资 pb,市净率 timeToMarket,上市日期
def Get_Stock_List():
df = ts.get_stock_basics()
return df
#修改了的函数,按照多个指标进行分析
#按照MACD,KDJ等进行分析
def Get_TA(df_Code,Dist):
operate_array1=[]
operate_array2=[]
operate_array3=[]
count = 0
for code in df_Code.index:
# index,0 - 6 date:日期 open:开盘价 high:最高价 close:收盘价 low:最低价 volume:成交量 price_change:价格变动 p_change:涨跌幅
# 7-12 ma5:5日均价 ma10:10日均价 ma20:20日均价 v_ma5:5日均量v_ma10:10日均量 v_ma20:20日均量
df = ts.get_hist_data(code,start='2014-11-20')
dflen = df.shape[0]
count = count + 1
if dflen>35:
try:
(df,operate1) = Get_MACD(df)
(df,operate2) = Get_KDJ(df)
(df,operate3) = Get_RSI(df)
except Exception as e:
Write_Blog(e,Dist)
pass
operate_array1.append(operate1) #round(df.iat[(dflen-1),16],2)
operate_array2.append(operate2)
operate_array3.append(operate3)
if count == 0:
Write_Blog(str(count),Dist)
df_Code['MACD']=pd.Series(operate_array1,index=df_Code.index)
df_Code['KDJ']=pd.Series(operate_array2,index=df_Code.index)
df_Code['RSI']=pd.Series(operate_array3,index=df_Code.index)
return df_Code
#通过MACD判断买入卖出
def Get_MACD(df):
#参数12,26,9
macd, macdsignal, macdhist = ta.MACD(np.array(df['close']), fastperiod=12, slowperiod=26, signalperiod=9)
SignalMA5 = ta.MA(macdsignal, timeperiod=5, matype=0)
SignalMA10 = ta.MA(macdsignal, timeperiod=10, matype=0)
SignalMA20 = ta.MA(macdsignal, timeperiod=20, matype=0)
#13-15 DIFF DEA DIFF-DEA
df['macd']= | pd.Series(macd,index=df.index) | pandas.Series |
from dataset import SurgeryDataset
import dataset
import click
from train import get_train_val_data_loaders, run_epoch
from model import get_model_name, save_model, save_results, get_model
import torch.nn as nn
import pandas as pd
from torch.utils.data import DataLoader
import cv2
import utils
def get_test_data_loaders(segments_df, batch_size, data_dir='data/', model='BLV', balance=True, pre_crop_size=256,
aug_method='val', segment_length=5):
df = segments_df.sort_values(by=['video_id', 'start_seconds'])
test_dataset = SurgeryDataset(df, data_dir=data_dir, mode='test', model=model, balance=balance,
pre_crop_size=pre_crop_size, aug_method=aug_method, segment_length=segment_length)
print("Number of segments: %d" % test_dataset.__len__())
test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
num_workers=0, pin_memory=False)
return test_data_loader
def evaluate(net, video_ids, anns_path, batch_size=64, experiment_dir=None, use_anns=False, balance=False):
segments_df = dataframe_from_video_ids(video_ids, anns_path, use_anns=use_anns)
test_data_loader = get_test_data_loaders(segments_df, batch_size, balance=balance)
criterion = nn.BCELoss()
optimizer = None
net.eval()
val_accuracy, y_true, y_score, results = run_epoch(test_data_loader, net, optimizer, criterion, 0)
df = | pd.DataFrame(results) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, List, Union
from fastprogress import progress_bar
from scipy.interpolate import UnivariateSpline
from scipy import integrate
from tsfresh import extract_relevant_features, extract_features
class BasicOperationsOnSpectrum:
def __init__(self, **kwargs):
self.kwargs = kwargs
def fit_transform(self, X: pd.DataFrame):
return self.transform(X)
def transform(self, X: pd.DataFrame):
operations = self.kwargs["operations"]
prefix = self.kwargs["prefix"] + "_"
result_dict: Dict[str, List[Union[int, float]]] = {
op: []
for op in operations
}
base_dir = Path("input/atma5/spectrum")
for _, row in progress_bar(X.iterrows(), total=len(X)):
spectrum = pd.read_csv(
base_dir / row.spectrum_filename, sep="\t", header=None)
for op in operations:
result_dict[op].append(spectrum[1].__getattribute__(op)())
df = pd.DataFrame(result_dict)
df.columns = [prefix + c for c in df.columns]
return df
class SpectrumIntegral:
def __init__(self, **kwargs):
self.kwargs = kwargs
def fit_transform(self, X: pd.DataFrame):
return self.transform(X)
def transform(self, X: pd.DataFrame):
unique_filenames = X["spectrum_filename"].unique()
integrals = []
for filename in progress_bar(unique_filenames):
spec = X.query(f"spectrum_filename == '{filename}'")
x = spec["wl"].values
y = spec["intensity"].values
method = self.kwargs["how"]
integrals.append(integrate.__getattribute__(method)(y, x))
return | pd.DataFrame({"spectrum_integral": integrals}) | pandas.DataFrame |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_trade_undefined_instrument(self):
blt = self.make_blotter()
ts = pd.Timestamp('2016-12-10T08:30:00')
instr = 'CLZ6'
qty = 1
price = 48.56
def make_trade():
blt._trade(ts, instr, qty, price)
self.assertRaises(KeyError, make_trade)
def test_get_meta_data(self):
blt = blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# currency of instrument defaults to base ccy of blotter when not given
blt.define_generic("CL", margin=0.1, multiplier=100, commission=2.5,
isFX=False)
meta = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
metadata_exp = meta("USD", 0.1, 100, 2.5, False)
metadata = blt._gnrc_meta["CL"]
self.assertEqual(metadata, metadata_exp)
def test_get_holdings_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
hlds = blt.get_holdings_value(ts)
assert_series_equal(hlds, pd.Series())
def test_get_holdings_value_no_fx_conversion(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "ZAR", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, 'SXMZ15', qty, price)
def no_fx():
return blt.get_holdings_value(ts)
self.assertRaises(KeyError, no_fx)
def test_get_holdings_timestamp_before(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-05T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_holdings():
blt.get_holdings_value(ts)
self.assertRaises(ValueError, get_holdings)
def test_get_holdings_base_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([2082.73 * 100], index=['ESZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_AUD_instr_AUDUSD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'APZ15'
qty = 1
price = 5200
blt.define_generic("AP", "AUD", 0.1, 1, 2.5)
blt.map_instrument("AP", "APZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([5283 * 0.73457], index=['APZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_CAD_instr_USDCAD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'SXMZ15'
qty = 1
price = 802.52
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([795.95 / 1.3183], index=['SXMZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_instruments_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
instrs = blt.get_instruments()
assert_series_equal(instrs, pd.Series())
def test_get_instruments_multiplier(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty], index=['ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_instruments_two_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr1 = 'ESZ15'
instr2 = 'CLZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.define_generic("CL", "CAD", 0.1, 1, 2.5)
blt.map_instrument("CL", "CLZ15")
blt._trade(ts, instr1, qty, price)
blt._trade(ts, instr2, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty, qty], index=['CLZ15', 'ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_trades_one_future_base_to_base(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price,
"USD", 1.0]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_one_future_with_mid_price_fx(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "CAD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price, "CAD",
1 / 1.3125]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_two_futures(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price1 = 2081
mid_price1 = 2080.75
price2 = 2083
mid_price2 = 2082.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.map_instrument("ES", "ESF16")
blt._trade(ts, instr, qty, price1, mid_price1)
blt._trade(ts, instr, qty, price2, mid_price2)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
data = [[instr, 1, 50, price1, mid_price1, "USD", 1.0],
[instr, 1, 50, price2, mid_price2, "USD", 1.0]]
exp_trades = pd.DataFrame(data, index=[ts, ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_create_unknown_event(self):
blt = self.make_blotter()
ts = pd.Timestamp('2015-08-03T00:00:00')
def create_unknown():
return blt.create_events(ts, "NotAllowed")
self.assertRaises(NotImplementedError, create_unknown)
def test_dispatch_unknown_event(self):
blt = self.make_blotter()
ev = blotter._Event("NotAnEvent",
{"timestamp": pd.Timestamp('2015-01-01')})
def dispatch_unknown():
blt.dispatch_events([ev])
self.assertRaises(NotImplementedError, dispatch_unknown)
def test_create_interest_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = pd.Timestamp('2015-08-04T00:00:00')
evs = blt.create_events(ts, "INTEREST")
irates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
aud_int = irates.loc[ts, "AUD"] / 365 * 1000000
jpy_int = irates.loc[ts, "JPY"] / 365 * 1000000
evs_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "AUD",
"quantity": aud_int}),
blotter._Event("INTEREST", {"timestamp": ts, "ccy": "JPY",
"quantity": jpy_int})]
self.assertEventsEqual(evs, evs_exp)
def test_create_interest_event_no_rate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
# No ZAR data
blt._holdings.update_cash(ts, "ZAR", 1000000)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_interest():
return blt.create_events(ts, "INTEREST")
self.assertRaises(KeyError, get_interest)
def test_create_interest_weekend_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-06T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = pd.Timestamp('2015-08-07T00:00:00')
evs = blt.create_events(ts, "INTEREST")
irates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
aud_int = irates.loc[ts, "AUD"] / 365 * 3 * 1000000
jpy_int = irates.loc[ts, "JPY"] / 365 * 3 * 1000000
evs_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "AUD",
"quantity": aud_int}),
blotter._Event("INTEREST", {"timestamp": ts, "ccy": "JPY",
"quantity": jpy_int})]
self.assertEventsEqual(evs, evs_exp)
def test_create_margin_event(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
margin_charge=0.015)
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, 'SXMZ15', qty, price)
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "MARGIN")
rates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
sxm_fp = os.path.join(self.prices, 'SXMZ15.csv')
sxm = pd.read_csv(sxm_fp, index_col=0, parse_dates=True)
usdcad_fp = os.path.join(self.prices, 'USDCAD.csv')
usdcad = pd.read_csv(usdcad_fp, index_col=0, parse_dates=True)
es_notional = es.loc[ts].values * qty * 0.05
sxm_notional = sxm.loc[ts].values * qty * 0.1 / usdcad.loc[ts].values
notnl = float(es_notional + sxm_notional)
quantity = notnl * (rates.loc[ts, "USD"] + 0.015) / 365
ev_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "USD",
"quantity": quantity})]
self.assertEventsEqual(ev, ev_exp)
def test_create_short_margin_event(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
margin_charge=0.015)
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = -1
price = 0
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "MARGIN")
rates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
es_notional = float(es.loc[ts].values * np.abs(qty) * 0.05)
quantity = es_notional * (rates.loc[ts, "USD"] + 0.015) / 365
ev_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "USD",
"quantity": quantity})]
self.assertEventsEqual(ev, ev_exp)
def test_create_pnl_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = | pd.Timestamp('2015-08-04T00:00:00') | pandas.Timestamp |
import pandas as pd
import numpy as np
def clean_last_season():
fantasy_data = | pd.read_csv('../resources/1617playerdata.csv', sep=',', encoding='utf-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import quantipy as qp
# from matplotlib import pyplot as plt
# import matplotlib.image as mpimg
import string
import pickle
import warnings
try:
import seaborn as sns
from PIL import Image
except:
pass
from quantipy.core.cache import Cache
from quantipy.core.view import View
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.view_generators.view_maps import QuantipyViews
from quantipy.core.helpers.functions import emulate_meta
from quantipy.core.tools.view.logic import (has_any, has_all, has_count,
not_any, not_all, not_count,
is_lt, is_ne, is_gt,
is_le, is_eq, is_ge,
union, intersection, get_logic_index)
from quantipy.core.helpers.functions import (paint_dataframe,
emulate_meta,
get_text,
finish_text_key)
from quantipy.core.tools.dp.prep import recode
from quantipy.core.tools.qp_decorators import lazy_property
from operator import add, sub, mul
from operator import truediv as div
#from scipy.stats.stats import _ttest_finish as get_pval
from scipy.stats._stats_py import _ttest_finish as get_pval
from scipy.stats import chi2 as chi2dist
from scipy.stats import f as fdist
from itertools import combinations, chain, product
from collections import defaultdict, OrderedDict, Counter
import gzip
try:
import dill
except:
pass
import json
import copy
import time
import sys
import re
from quantipy.core.rules import Rules
_TOTAL = '@'
_AXES = ['x', 'y']
class ChainManager(object):
def __init__(self, stack):
self.stack = stack
self.__chains = []
self.source = 'native'
self.build_info = {}
self._hidden = []
def __str__(self):
return '\n'.join([chain.__str__() for chain in self])
def __repr__(self):
return self.__str__()
def __getitem__(self, value):
if isinstance(value, str):
element = self.__chains[self._idx_from_name(value)]
is_folder = isinstance(element, dict)
if is_folder:
return list(element.values())[0]
else:
return element
else:
return self.__chains[value]
def __len__(self):
"""returns the number of cached Chains"""
return len(self.__chains)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < self.__len__():
obj = self[self.n]
self.n += 1
return obj
else:
raise StopIteration
next = __next__
def add_chain(self, chain):
self.__chains.append(chain)
@property
def folders(self):
"""
Folder indices, names and number of stored ``qp.Chain`` items (as tuples).
"""
return [(self.__chains.index(f), list(f.keys())[0], len(list(f.values())[0]))
for f in self if isinstance(f, dict)]
@property
def singles(self):
"""
The list of all non-folder ``qp.Chain`` indices and names (as tuples).
"""
return list(zip(self.single_idxs, self.single_names))
@property
def chains(self):
"""
The flattened list of all ``qp.Chain`` items of self.
"""
all_chains = []
for c in self:
if isinstance(c, dict):
all_chains.extend(list(c.values())[0])
else:
all_chains.append(c)
return all_chains
@property
def folder_idxs(self):
"""
The folders' index positions in self.
"""
return [f[0] for f in self.folders]
@property
def folder_names(self):
"""
The folders' names from self.
"""
return [f[1] for f in self.folders]
@property
def single_idxs(self):
"""
The ``qp.Chain`` instances' index positions in self.
"""
return [self.__chains.index(c) for c in self if isinstance(c, Chain)]
@property
def single_names(self):
"""
The ``qp.Chain`` instances' names.
"""
return [s.name for s in self if isinstance(s, Chain)]
@property
def hidden(self):
"""
All ``qp.Chain`` elements that are hidden.
"""
return [c.name for c in self.chains if c.hidden]
@property
def hidden_folders(self):
"""
All hidden folders.
"""
return [n for n in self._hidden if n in self.folder_names]
def _content_structure(self):
return ['folder' if isinstance(k, dict) else 'single' for k in self]
def _singles_to_idx(self):
return {name: i for i, name in list(self._idx_to_singles().items())}
def _idx_to_singles(self):
return dict(self.singles)
def _idx_fold(self):
return dict([(f[0], f[1]) for f in self.folders])
def _folders_to_idx(self):
return {name: i for i, name in list(self._idx_fold().items())}
def _names(self, unroll=False):
if not unroll:
return self.folder_names + self.single_names
else:
return [c.name for c in self.chains]
def _idxs_to_names(self):
singles = self.singles
folders = [(f[0], f[1]) for f in self.folders]
return dict(singles + folders)
def _names_to_idxs(self):
return {n: i for i, n in list(self._idxs_to_names().items())}
def _name_from_idx(self, name):
return self._idxs_to_names()[name]
def _idx_from_name(self, idx):
return self._names_to_idxs()[idx]
def _is_folder_ref(self, ref):
return ref in self._folders_to_idx() or ref in self._idx_fold()
def _is_single_ref(self, ref):
return ref in self._singles_to_idx or ref in self._idx_to_singles()
def _uniquify_names(self):
all_names = Counter(self.single_names + self.folder_names)
single_name_occ = Counter(self.single_names)
folder_name_occ = {folder: Counter([c.name for c in self[folder]])
for folder in self.folder_names}
for struct_name in all_names:
if struct_name in folder_name_occ:
iter_over = folder_name_occ[struct_name]
is_folder = struct_name
else:
iter_over = single_name_occ
is_folder = False
for name, occ in list(iter_over.items()):
if occ > 1:
new_names = ['{}_{}'.format(name, i) for i in range(1, occ + 1)]
idx = [s[0] for s in self.singles if s[1] == name]
pairs = list(zip(idx, new_names))
if is_folder:
for idx, c in enumerate(self[is_folder]):
c.name = pairs[idx][1]
else:
for p in pairs:
self.__chains[p[0]].name = p[1]
return None
def _set_to_folderitems(self, folder):
"""
Will keep only the ``values()`` ``qp.Chain`` item list from the named
folder. Use this for within-folder-operations...
"""
if not folder in self.folder_names:
err = "A folder named '{}' does not exist!".format(folder)
raise KeyError(err)
else:
org_chains = self.__chains[:]
org_index = self._idx_from_name(folder)
self.__chains = self[folder]
return org_chains, org_index
def _rebuild_org_folder(self, folder, items, index):
"""
After a within-folder-operation this method is using the returns
of ``_set_to_folderitems`` to rebuild the originating folder.
"""
self.fold(folder)
new_folder = self.__chains[:]
self.__chains = items
self.__chains[index] = new_folder[0]
return None
@staticmethod
def _dupes_in_chainref(chain_refs):
return len(set(chain_refs)) != len(chain_refs)
def _check_equality(self, other, return_diffs=True):
"""
"""
chains1 = self.chains
chains2 = other.chains
diffs = {}
if not len(chains1) == len(chains2):
return False
else:
paired = list(zip(chains1, chains2))
for c1, c2 in paired:
atts1 = c1.__dict__
atts2 = c2.__dict__
for att in list(atts1.keys()):
if isinstance(atts1[att], (pd.DataFrame, pd.Index)):
if not atts1[att].equals(atts2[att]):
diffs[att] = [atts1[att], atts2[att]]
else:
if atts1[att] != atts2[att]:
diffs[att] = [atts1[att], atts2[att]]
return diffs if return_diffs else not diffs
def _test_same_structure(self, other):
"""
"""
folders1 = self.folders
singles1 = self.singles
folders2 = other.folders
singles2 = other.singles
if (folders1 != folders2 or singles1 != singles2):
return False
else:
return True
def equals(self, other):
"""
Test equality of self to another ``ChainManager`` object instance.
.. note::
Only the flattened list of ``Chain`` objects stored are tested, i.e.
any folder structure differences are ignored. Use ``compare()`` for
a more detailed comparison.
Parameters
----------
other : ``qp.ChainManager``
Another ``ChainManager`` object to compare.
Returns
-------
equality : bool
"""
return self._check_equality(other, False)
def compare(self, other, strict=True, full_summary=True):
"""
Compare structure and content of self to another ``ChainManager`` instance.
Parameters
----------
other : ``qp.ChainManager``
Another ``ChainManager`` object to compare.
strict : bool, default True
Test if the structure of folders vs. single Chain objects is the
same in both ChainManager instances.
full_summary : bool, default True
``False`` will disable the detailed comparison ``pd.DataFrame``
that informs about differences between the objects.
Returns
-------
result : str
A brief feedback message about the comparison results.
"""
diffs = []
if strict:
same_structure = self._test_same_structure(other)
if not same_structure:
diffs.append('s')
check = self._check_equality(other)
if isinstance(check, bool):
diffs.append('l')
else:
if check: diffs.append('c')
report_full = ['_frame', '_x_keys', '_y_keys', 'index', '_columns',
'base_descriptions', 'annotations']
diffs_in = ''
if diffs:
if 'l' in diffs:
diffs_in += '\n -Length (number of stored Chain objects)'
if 's' in diffs:
diffs_in += '\n -Structure (folders and/or single Chain order)'
if 'c' in diffs:
diffs_in += '\n -Chain elements (properties and content of Chain objects)'
if diffs_in:
result = 'ChainManagers are not identical:\n'
result += '--------------------------------' + diffs_in
else:
result = 'ChainManagers are identical.'
print(result)
return None
def save(self, path, keep_stack=False):
"""
"""
if not keep_stack:
del self.stack
self.stack = None
f = open(path, 'wb')
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
f.close()
return None
@staticmethod
def load(path):
"""
"""
f = open(path, 'rb')
obj = pickle.load(f)
f.close()
return obj
def _toggle_vis(self, chains, mode='hide'):
if not isinstance(chains, list): chains = [chains]
for chain in chains:
if isinstance(chain, dict):
fname = list(chain.keys())[0]
elements = list(chain.values())[0]
fidx = self._idx_from_name(fname)
folder = self[fidx][fname]
for c in folder:
if c.name in elements:
c.hidden = True if mode == 'hide' else False
if mode == 'hide' and not c.name in self._hidden:
self._hidden.append(c.name)
if mode == 'unhide' and c.name in self._hidden:
self._hidden.remove(c.name)
else:
if chain in self.folder_names:
for c in self[chain]:
c.hidden = True if mode == 'hide' else False
else:
self[chain].hidden = True if mode == 'hide' else False
if mode == 'hide':
if not chain in self._hidden:
self._hidden.append(chain)
else:
if chain in self._hidden:
self._hidden.remove(chain)
return None
def hide(self, chains):
"""
Flag elements as being hidden.
Parameters
----------
chains : (list) of int and/or str or dict
The ``qp.Chain`` item and/or folder names to hide. To hide *within*
a folder use a dict to map the desired Chain names to the belonging
folder name.
Returns
-------
None
"""
self._toggle_vis(chains, 'hide')
return None
def unhide(self, chains=None):
"""
Unhide elements that have been set as ``hidden``.
Parameters
----------
chains : (list) of int and/or str or dict, default None
The ``qp.Chain`` item and/or folder names to unhide. To unhide *within*
a folder use a dict to map the desired Chain names to the belonging
folder name. If not provided, all hidden elements will be unhidden.
Returns
-------
None
"""
if not chains: chains = self.folder_names + self.single_names
self._toggle_vis(chains, 'unhide')
return None
def clone(self):
"""
Return a full (deep) copy of self.
"""
return copy.deepcopy(self)
def insert(self, other_cm, index=-1, safe_names=False):
"""
Add elements from another ``ChainManager`` instance to self.
Parameters
----------
other_cm : ``quantipy.ChainManager``
A ChainManager instance to draw the elements from.
index : int, default -1
The positional index after which new elements will be added.
Defaults to -1, i.e. elements are appended at the end.
safe_names : bool, default False
If True and any duplicated element names are found after the
operation, names will be made unique (by appending '_1', '_2', '_3',
etc.).
Returns
------
None
"""
if not isinstance(other_cm, ChainManager):
raise ValueError("other_cm must be a quantipy.ChainManager instance.")
if not index == -1:
before_c = self.__chains[:index+1]
after_c = self.__chains[index+1:]
new_chains = before_c + other_cm.__chains + after_c
self.__chains = new_chains
else:
self.__chains.extend(other_cm.__chains)
if safe_names: self._uniquify_names()
return None
def merge(self, folders, new_name=None, drop=True):
"""
Unite the items of two or more folders, optionally providing a new name.
If duplicated ``qp.Chain`` items are found, the first instance will be
kept. The merged folder will take the place of the first folder named
in ``folders``.
Parameters
----------
folders : list of int and/or str
The folders to merge refernced by their positional index or by name.
new_name : str, default None
Use this as the merged folder's name. If not provided, the name
of the first folder in ``folders`` will be used instead.
drop : bool, default True
If ``False``, the original folders will be kept alongside the
new merged one.
Returns
-------
None
"""
if not isinstance(folders, list):
err = "'folders' must be a list of folder references!"
raise TypeError(err)
if len(folders) == 1:
err = "'folders' must contain at least two folder names!"
raise ValueError(err)
if not all(self._is_folder_ref(f) for f in folders):
err = "One or more folder names from 'folders' do not exist!"
ValueError(err)
folders = [f if isinstance(f, str) else self._name_from_idx(f)
for f in folders]
folder_idx = self._idx_from_name(folders[0])
if not new_name: new_name = folders[0]
merged_items = []
seen_names = []
for folder in folders:
for chain in self[folder]:
if not chain.name in seen_names:
merged_items.append(chain)
seen_names.append(chain.name)
if drop:
self.__chains[folder_idx] = {new_name: merged_items}
remove_folders = folders[1:] if new_name != folders[0] else folders
for r in remove_folders:
self.remove(r)
else:
start = self.__chains[:folder_idx]
end = self.__chains[folder_idx:]
self.__chains = start + [{new_name: merged_items}] + end
return None
def fold(self, folder_name=None, chains=None):
"""
Arrange non-``dict`` structured ``qp.Chain`` items in folders.
All separate ``qp.Chain`` items will be mapped to their ``name``
property being the ``key`` in the transformed ``dict`` structure.
Parameters
----------
folder_name : str, default None
Collect all items in a folder keyed by the provided name. If the
key already exists, the items will be appended to the ``dict``
values.
chains : (list) of int and/or str, default None
Select specific ``qp.Chain`` items by providing their positional
indices or ``name`` property value for moving only a subset to the
folder.
Returns
-------
None
"""
if chains:
if not isinstance(chains, list): chains = [chains]
if any(self._is_folder_ref(c) for c in chains):
err = 'Cannot build folder from other folders!'
raise ValueError(err)
all_chain_names = []
singles = []
for c in chains:
if isinstance(c, str):
all_chain_names.append(c)
elif isinstance(c, int) and c in self._idx_to_singles():
all_chain_names.append(self._idx_to_singles()[c])
for c in all_chain_names:
singles.append(self[self._singles_to_idx()[c]])
else:
singles = [s for s in self if isinstance(s, Chain)]
if self._dupes_in_chainref(singles):
err = "Cannot build folder from duplicate qp.Chain references: {}"
raise ValueError(err.format(singles))
for s in singles:
if folder_name:
if folder_name in self.folder_names:
self[folder_name].append(s)
else:
self.__chains.append({folder_name: [s]})
del self.__chains[self._singles_to_idx()[s.name]]
else:
self.__chains[self._singles_to_idx()[s.name]] = {s.name: [s]}
return None
def unfold(self, folder=None):
"""
Remove folder but keep the collected items.
The items will be added starting at the old index position of the
original folder.
Parameters
----------
folder : (list of) str, default None
The name of the folder to drop and extract items from. If not
provided all folders will be unfolded.
Returns
-------
None
"""
if not folder:
folder = self.folder_names
else:
if not isinstance(folder, list): folder = [folder]
invalid = [f for f in folder if f not in self.folder_names]
if invalid:
err = "Folder(s) named '{}' not found.".format(invalid)
raise KeyError(err)
for f in folder:
old_pos = self._idx_from_name(f)
items = self[f]
start = self.__chains[: old_pos]
end = self.__chains[old_pos + 1: ]
self.__chains = start + items + end
return None
def remove(self, chains, folder=None, inplace=True):
"""
Remove (folders of) ``qp.Chain`` items by providing a list of indices
or names.
Parameters
----------
chains : (list) of int and/or str
``qp.Chain`` items or folders by provided by their positional
indices or ``name`` property.
folder : str, default None
If a folder name is provided, items will be dropped within that
folder only instead of removing all found instances.
inplace : bool, default True
By default the new order is applied inplace, set to ``False`` to
return a new object instead.
Returns
-------
None
"""
if inplace:
cm = self
else:
cm = self.clone()
if folder:
org_chains, org_index = cm._set_to_folderitems(folder)
if not isinstance(chains, list): chains = [chains]
remove_idxs= [c if isinstance(c, int) else cm._idx_from_name(c)
for c in chains]
if cm._dupes_in_chainref(remove_idxs):
err = "Cannot remove with duplicate chain references: {}"
raise ValueError(err.format(remove_idxs))
new_items = []
for pos, c in enumerate(cm):
if not pos in remove_idxs: new_items.append(c)
cm.__chains = new_items
if folder: cm._rebuild_org_folder(folder, org_chains, org_index)
if inplace:
return None
else:
return cm
def cut(self, values, ci=None, base=False, tests=False):
"""
Isolate selected axis values in the ``Chain.dataframe``.
Parameters
----------
values : (list of) str
The string must indicate the raw (i.e. the unpainted) second level
axis value, e.g. ``'mean'``, ``'net_1'``, etc.
ci : {'counts', 'c%', None}, default None
The cell item version to target if multiple frequency representations
are present.
base : bool, default False
Controls keeping any existing base view aggregations.
tests : bool, default False
Controls keeping any existing significance test view aggregations.
Returns
-------
None
"""
if not isinstance(values, list): values = [values]
if 'cbase' in values:
values[values.index('cbase')] = 'All'
if base and not 'All' in values:
values = ['All'] + values
for c in self.chains:
# force ci parameter for proper targeting on array summaries...
if c.array_style == 0 and ci is None:
_ci = c.cell_items.split('_')[0]
if not _ci.startswith('counts'):
ci = '%'
else:
ci = 'counts'
if c.sig_test_letters: c._remove_letter_header()
idxs, names, order = c._view_idxs(
values, keep_tests=tests, keep_bases=base, names=True, ci=ci)
idxs = [i for _, i in sorted(zip(order, idxs))]
names = [n for _, n in sorted(zip(order, names))]
if c.ci_count > 1: c._non_grouped_axis()
if c.array_style == 0:
c._fill_cells()
start, repeat = c._row_pattern(ci)
c._frame = c._frame.iloc[start::repeat, idxs]
else:
c._frame = c._frame.iloc[idxs, :]
c.index = c._slice_edited_index(c.index, idxs)
new_views = OrderedDict()
for v in c.views.copy():
if not v in names:
del c._views[v]
else:
c._views[v] = names.count(v)
if not c._array_style == 0:
if not tests:
c.sig_test_letters = None
else:
c._frame = c._apply_letter_header(c._frame)
c.edited = True
return None
def join(self, title='Summary'):
"""
Join **all** ``qp.Chain```elements, concatenating along the matching axis.
Parameters
----------
title : {str, 'auto'}, default 'Summary'
The new title for the joined axis' index representation.
Returns
-------
None
"""
custom_views = []
self.unfold()
chains = self.chains
totalmul = len(chains[0]._frame.columns.get_level_values(0).tolist())
concat_dfs = []
new_labels = []
for c in chains:
new_label = []
if c.sig_test_letters:
c._remove_letter_header()
c._frame = c._apply_letter_header(c._frame)
df = c.dataframe
if not c.array_style == 0:
new_label.append(df.index.get_level_values(0).values.tolist()[0])
new_label.extend((len(c.describe()) - 1) * [''])
else:
new_label.extend(df.index.get_level_values(1).values.tolist())
names = ['Question', 'Values']
join_idx = pd.MultiIndex.from_product([[title], new_label], names=names)
df.index = join_idx
df.rename(columns={c._x_keys[0]: 'Total'}, inplace=True)
if not c.array_style == 0:
custom_views.extend(c._views_per_rows())
else:
df.columns.set_levels(levels=[title]*totalmul, level=0, inplace=True)
concat_dfs.append(df)
new_df = pd.concat(concat_dfs, axis=0, join='inner')
self.chains[0]._frame = new_df
self.reorder([0])
self.rename({self.single_names[0]: title})
self.fold()
self.chains[0]._custom_views = custom_views
return None
def reorder(self, order, folder=None, inplace=True):
"""
Reorder (folders of) ``qp.Chain`` items by providing a list of new
indices or names.
Parameters
----------
order : list of int and/or str
The folder or ``qp.Chain`` references to determine the new order
of items. Any items not referenced will be removed from the new
order.
folder : str, default None
If a folder name is provided, items will be sorted within that
folder instead of applying the sorting to the general items
collection.
inplace : bool, default True
By default the new order is applied inplace, set to ``False`` to
return a new object instead.
Returns
-------
None
"""
if inplace:
cm = self
else:
cm = self.clone()
if folder:
org_chains, org_index = self._set_to_folderitems(folder)
if not isinstance(order, list):
err = "'order' must be a list!"
raise ValueError(err)
new_idx_order = []
for o in order:
if isinstance(o, int):
new_idx_order.append(o)
else:
new_idx_order.append(self._idx_from_name(o))
if cm._dupes_in_chainref(new_idx_order):
err = "Cannot reorder from duplicate qp.Chain references: {}"
raise ValueError(err.format(new_idx_order))
items = [self.__chains[idx] for idx in new_idx_order]
cm.__chains = items
if folder: cm._rebuild_org_folder(folder, org_chains, org_index)
if inplace:
return None
else:
return cm
def rename(self, names, folder=None):
"""
Rename (folders of) ``qp.Chain`` items by providing a mapping of old
to new keys.
Parameters
----------
names : dict
Maps existing names to the desired new ones, i.e.
{'old name': 'new names'} pairs need to be provided.
folder : str, default None
If a folder name is provided, new names will only be applied
within that folder. This is without effect if all ``qp.Chain.name``
properties across the items are unique.
Returns
-------
None
"""
if not isinstance(names, dict):
err = "''names' must be a dict of old_name: new_name pairs."
raise ValueError(err)
if folder and not folder in self.folder_names:
err = "A folder named '{}' does not exist!".format(folder)
raise KeyError(err)
for old, new in list(names.items()):
no_folder_name = folder and not old in self._names(False)
no_name_across = not folder and not old in self._names(True)
if no_folder_name and no_name_across:
err = "'{}' is not an existing folder or ``qp.Chain`` name!"
raise KeyError(err.format(old))
else:
within_folder = old not in self._names(False)
if not within_folder:
idx = self._idx_from_name(old)
if not isinstance(self.__chains[idx], dict):
self.__chains[idx].name = new
else:
self.__chains[idx] = {new: self[old][:]}
else:
iter_over = self[folder] if folder else self.chains
for c in iter_over:
if c.name == old: c.name = new
return None
def _native_stat_names(self, idxvals_list, text_key=None):
"""
"""
if not text_key: text_key = 'en-GB'
replacements = {
'en-GB': {
'Weighted N': 'Base', # Crunch
'N': 'Base', # Crunch
'Mean': 'Mean', # Dims
'StdDev': 'Std. dev', # Dims
'StdErr': 'Std. err. of mean', # Dims
'SampleVar': 'Sample variance' # Dims
},
}
native_stat_names = []
for val in idxvals_list:
if val in replacements[text_key]:
native_stat_names.append(replacements[text_key][val])
else:
native_stat_names.append(val)
return native_stat_names
def _get_ykey_mapping(self):
ys = []
letters = string.ascii_uppercase + string.ascii_lowercase
for c in self.chains:
if c._y_keys not in ys:
ys.append(c._y_keys)
return list(zip(ys, letters))
def describe(self, by_folder=False, show_hidden=False):
"""
Get a structual summary of all ``qp.Chain`` instances found in self.
Parameters
----------
by_folder : bool, default False
If True, only information on ``dict``-structured (folder-like)
``qp.Chain`` items is shown, multiindexed by folder names and item
enumerations.
show_hidden : bool, default False
If True, the summary will also include elements that have been set
hidden using ``self.hide()``.
Returns
-------
None
"""
folders = []
folder_items = []
variables = []
names = []
array_sum = []
sources = []
banner_ids = []
item_pos = []
hidden = []
bannermap = self._get_ykey_mapping()
for pos, chains in enumerate(self):
is_folder = isinstance(chains, dict)
if is_folder:
folder_name = list(chains.keys())
chains = list(chains.values())[0]
folder_items.extend(list(range(0, len(chains))))
item_pos.extend([pos] * len(chains))
else:
chains = [chains]
folder_name = [None]
folder_items.append(None)
item_pos.append(pos)
if chains[0].structure is None:
variables.extend([c._x_keys[0] for c in chains])
names.extend([c.name for c in chains])
folders.extend(folder_name * len(chains))
array_sum.extend([True if c.array_style > -1 else False
for c in chains])
sources.extend(c.source if not c.edited else 'edited'
for c in chains)
for c in chains:
for m in bannermap:
if m[0] == c._y_keys: banner_ids.append(m[1])
else:
variables.extend([chains[0].name])
names.extend([chains[0].name])
folders.extend(folder_name)
array_sum.extend([False])
sources.extend(c.source for c in chains)
banner_ids.append(None)
for c in chains:
if c.hidden:
hidden.append(True)
else:
hidden.append(False)
df_data = [item_pos,
names,
folders,
folder_items,
variables,
sources,
banner_ids,
array_sum,
hidden]
df_cols = ['Position',
'Name',
'Folder',
'Item',
'Variable',
'Source',
'Banner id',
'Array',
'Hidden']
df = pd.DataFrame(df_data).T
df.columns = df_cols
if by_folder:
df = df.set_index(['Position', 'Folder', 'Item'])
if not show_hidden:
df = df[df['Hidden'] == False][df.columns[:-1]]
return df
def from_mtd(self, mtd_doc, ignore=None, paint=True, flatten=False):
"""
Convert a Dimensions table document (.mtd) into a collection of
quantipy.Chain representations.
Parameters
----------
mtd_doc : (pandified) .mtd
A Dimensions .mtd file or the returned result of ``pandify_mtd()``.
A "pandified" .mtd consists of ``dict`` of ``pandas.DataFrame``
and metadata ``dict``. Additional text here...
ignore : bool, default False
Text
labels : bool, default True
Text
flatten : bool, default False
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the pandas-converted
.mtd file.
"""
def relabel_axes(df, meta, sigtested, labels=True):
"""
"""
for axis in ['x', 'y']:
if axis == 'x':
transf_axis = df.index
else:
transf_axis = df.columns
levels = transf_axis.nlevels
axis_meta = 'index-emetas' if axis == 'x' else 'columns-emetas'
for l in range(0, levels):
if not (sigtested and axis == 'y' and l == levels -1):
org_vals = transf_axis.get_level_values(l).tolist()
org_names = [ov.split('|')[0] for ov in org_vals]
org_labs = [ov.split('|')[1] for ov in org_vals]
new_vals = org_labs if labels else org_names
if l > 0:
for no, axmeta in enumerate(meta[axis_meta]):
if axmeta['Type'] != 'Category':
new_vals[no] = axmeta['Type']
new_vals = self._native_stat_names(new_vals)
rename_dict = {old: new for old, new in zip(org_vals, new_vals)}
if axis == 'x':
df.rename(index=rename_dict, inplace=True)
df.index.names = ['Question', 'Values'] * (levels / 2)
else:
df.rename(columns=rename_dict, inplace=True)
if sigtested:
df.columns.names = (['Question', 'Values'] * (levels / 2) +
['Test-IDs'])
else:
df.columns.names = ['Question', 'Values'] * (levels / 2)
return None
def split_tab(tab):
"""
"""
df, meta = tab['df'], tab['tmeta']
mtd_slicer = df.index.get_level_values(0)
meta_limits = list(OrderedDict(
(i, mtd_slicer.tolist().count(i)) for i in mtd_slicer).values())
meta_slices = []
for start, end in enumerate(meta_limits):
if start == 0:
i_0 = 0
else:
i_0 = meta_limits[start-1]
meta_slices.append((i_0, end))
df_slicers = []
for e in mtd_slicer:
if not e in df_slicers:
df_slicers.append(e)
dfs = [df.loc[[s], :].copy() for s in df_slicers]
sub_metas = []
for ms in meta_slices:
all_meta = copy.deepcopy(meta)
idx_meta = all_meta['index-emetas'][ms[0]: ms[1]]
all_meta['index-emetas'] = idx_meta
sub_metas.append(all_meta)
return list(zip(dfs, sub_metas))
def _get_axis_vars(df):
axis_vars = []
for axis in [df.index, df.columns]:
ax_var = [v.split('|')[0] for v in axis.unique().levels[0]]
axis_vars.append(ax_var)
return axis_vars[0][0], axis_vars[1]
def to_chain(basic_chain_defintion, add_chain_meta):
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Dimensions MTD'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
# new_chain._pad_id = None
# new_chain._array_style = None
new_chain._has_rules = False
# new_chain.double_base = False
# new_chain.sig_test_letters = None
# new_chain.totalize = True
# new_chain._meta['var_meta'] = basic_chain_defintion[-1]
# new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
new_chain._views_per_rows()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
def mine_mtd(tab_collection, paint, chain_coll, folder=None):
failed = []
unsupported = []
for name, sub_tab in list(tab_collection.items()):
try:
if isinstance(list(sub_tab.values())[0], dict):
mine_mtd(sub_tab, paint, chain_coll, name)
else:
tabs = split_tab(sub_tab)
chain_dfs = []
for tab in tabs:
df, meta = tab[0], tab[1]
nestex_x = None
nested_y = (df.columns.nlevels % 2 == 0
and df.columns.nlevels > 2)
sigtested = (df.columns.nlevels % 2 != 0
and df.columns.nlevels > 2)
if sigtested:
df = df.swaplevel(0, axis=1).swaplevel(0, 1, 1)
else:
invalid = ['-', '*', '**']
df = df.applymap(
lambda x: float(x.replace(',', '.').replace('%', ''))
if isinstance(x, str) and not x in invalid
else x
)
x, y = _get_axis_vars(df)
df.replace('-', np.NaN, inplace=True)
relabel_axes(df, meta, sigtested, labels=paint)
colbase_l = -2 if sigtested else -1
for base in ['Base', 'UnweightedBase']:
df = df.drop(base, axis=1, level=colbase_l)
chain = to_chain((df, x, y), meta)
chain.name = name
chain_dfs.append(chain)
if not folder:
chain_coll.extend(chain_dfs)
else:
folders = [(i, list(c.keys())[0]) for i, c in
enumerate(chain_coll, 0) if
isinstance(c, dict)]
if folder in [f[1] for f in folders]:
pos = [f[0] for f in folders
if f[1] == folder][0]
chain_coll[pos][folder].extend(chain_dfs)
else:
chain_coll.append({folder: chain_dfs})
except:
failed.append(name)
return chain_coll
chain_coll = []
chains = mine_mtd(mtd_doc, paint, chain_coll)
self.__chains = chains
return self
def from_cmt(self, crunch_tabbook, ignore=None, cell_items='c',
array_summaries=True):
"""
Convert a Crunch multitable document (tabbook) into a collection of
quantipy.Chain representations.
Parameters
----------
crunch_tabbook : ``Tabbook`` object instance
Text
ignore : bool, default False
Text
cell_items : {'c', 'p', 'cp'}, default 'c'
Text
array_summaries : bool, default True
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the Crunch table
document.
"""
def cubegroups_to_chain_defs(cubegroups, ci, arr_sum):
"""
Convert CubeGroup DataFrame to a Chain.dataframe.
"""
chain_dfs = []
# DataFrame edits to get basic Chain.dataframe rep.
for idx, cubegroup in enumerate(cubegroups):
cubegroup_df = cubegroup.dataframe
array = cubegroup.is_array
# split arrays into separate dfs / convert to summary df...
if array:
ai_aliases = cubegroup.subref_aliases
array_elements = []
dfs = []
if array_summaries:
arr_sum_df = cubegroup_df.copy().unstack()['All']
arr_sum_df.is_summary = True
x_label = arr_sum_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs.append((arr_sum_df, x_label, x_name))
array_elements = cubegroup_df.index.levels[1].values.tolist()
ai_df = cubegroup_df.copy()
idx = cubegroup_df.index.droplevel(0)
ai_df.index = idx
for array_element, alias in zip(array_elements, ai_aliases):
dfs.append((ai_df.loc[[array_element], :].copy(),
array_element, alias))
else:
x_label = cubegroup_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs = [(cubegroup_df, x_label, x_name)]
# Apply QP-style DataFrame conventions (indexing, names, etc.)
for cgdf, x_var_label, x_var_name in dfs:
is_summary = hasattr(cgdf, 'is_summary')
if is_summary:
cgdf = cgdf.T
y_var_names = ['@']
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
y_var_names = cubegroup.colvars
x_names = ['Question', 'Values']
y_names = ['Question', 'Values']
cgdf.index = cgdf.index.droplevel(0)
# Compute percentages?
if cell_items == 'p': _calc_pct(cgdf)
# Build x-axis multiindex / rearrange "Base" row
idx_vals = cgdf.index.values.tolist()
cgdf = cgdf.reindex([idx_vals[-1]] + idx_vals[:-1])
idx_vals = cgdf.index.values.tolist()
mi_vals = [[x_var_label], self._native_stat_names(idx_vals)]
row_mi = pd.MultiIndex.from_product(mi_vals, names=x_names)
cgdf.index = row_mi
# Build y-axis multiindex
y_vals = [('Total', 'Total') if y[0] == 'All'
else y for y in cgdf.columns.tolist()]
col_mi = pd.MultiIndex.from_tuples(y_vals, names=y_names)
cgdf.columns = col_mi
if is_summary:
cgdf = cgdf.T
chain_dfs.append((cgdf, x_var_name, y_var_names, cubegroup._meta))
return chain_dfs
def _calc_pct(df):
df.iloc[:-1, :] = df.iloc[:-1, :].div(df.iloc[-1, :]) * 100
return None
def to_chain(basic_chain_defintion, add_chain_meta):
"""
"""
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Crunch multitable'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
new_chain._pad_id = None
new_chain._array_style = None
new_chain._has_rules = False
new_chain.double_base = False
new_chain.sig_test_letters = None
new_chain.totalize = True
new_chain._meta['var_meta'] = basic_chain_defintion[-1]
new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
# self.name = name OK!
# self._meta = Crunch meta OK!
# self._x_keys = None OK!
# self._y_keys = None OK!
# self._frame = None OK!
# self.totalize = False OK! -> But is True!
# self.stack = stack OK! -> N/A
# self._has_rules = None OK! -> N/A
# self.double_base = False OK! -> N/A
# self.sig_test_letters = None OK! -> N/A
# self._pad_id = None OK! -> N/A
# self._given_views = None OK! -> N/A
# self._grp_text_map = [] OK! -> N/A
# self._text_map = None OK! -> N/A
# self.grouping = None ?
# self._group_style = None ?
# self._transl = qp.core.view.View._metric_name_map() * with CMT/MTD
self.source = 'Crunch multitable'
cubegroups = crunch_tabbook.cube_groups
meta = {'display_settings': crunch_tabbook.display_settings,
'weight': crunch_tabbook.weight}
if cell_items == 'c':
meta['display_settings']['countsOrPercents'] = 'counts'
elif cell_items == 'p':
meta['display_settings']['countsOrPercents'] = 'percent'
chain_defs = cubegroups_to_chain_defs(cubegroups, cell_items,
array_summaries)
self.__chains = [to_chain(c_def, meta) for c_def in chain_defs]
return self
# ------------------------------------------------------------------------
def from_cluster(self, clusters):
"""
Create an OrderedDict of ``Cluster`` names storing new ``Chain``\s.
Parameters
----------
clusters : cluster-like ([dict of] quantipy.Cluster)
Text ...
Returns
-------
new_chain_dict : OrderedDict
Text ...
"""
self.source = 'native (old qp.Cluster of qp.Chain)'
qp.set_option('new_chains', True)
def check_cell_items(views):
c = any('counts' in view.split('|')[-1] for view in views)
p = any('c%' in view.split('|')[-1] for view in views)
cp = c and p
if cp:
cell_items = 'counts_colpct'
else:
cell_items = 'counts' if c else 'colpct'
return cell_items
def check_sigtest(views):
"""
"""
levels = []
sigs = [v.split('|')[1] for v in views if v.split('|')[1].startswith('t.')]
for sig in sigs:
l = '0.{}'.format(sig.split('.')[-1])
if not l in levels: levels.append(l)
return levels
def mine_chain_structure(clusters):
cluster_defs = []
for cluster_def_name, cluster in list(clusters.items()):
for name in cluster:
if isinstance(list(cluster[name].items())[0][1], pd.DataFrame):
cluster_def = {'name': name,
'oe': True,
'df': list(cluster[name].items())[0][1],
'filter': chain.filter,
'data_key': chain.data_key}
else:
xs, views, weight = [], [], []
for chain_name, chain in list(cluster[name].items()):
for v in chain.views:
w = v.split('|')[-2]
if w not in weight: weight.append(w)
if v not in views: views.append(v)
xs.append(chain.source_name)
ys = chain.content_of_axis
cluster_def = {'name': '{}-{}'.format(cluster_def_name, name),
'filter': chain.filter,
'data_key': chain.data_key,
'xs': xs,
'ys': ys,
'views': views,
'weight': weight[-1],
'bases': 'both' if len(weight) == 2 else 'auto',
'cell_items': check_cell_items(views),
'tests': check_sigtest(views)}
cluster_defs.append(cluster_def)
return cluster_defs
from quantipy.core.view_generators.view_specs import ViewManager
cluster_specs = mine_chain_structure(clusters)
for cluster_spec in cluster_specs:
oe = cluster_spec.get('oe', False)
if not oe:
vm = ViewManager(self.stack)
vm.get_views(cell_items=cluster_spec['cell_items'],
weight=cluster_spec['weight'],
bases=cluster_spec['bases'],
stats= ['mean', 'stddev', 'median', 'min', 'max'],
tests=cluster_spec['tests'])
self.get(data_key=cluster_spec['data_key'],
filter_key=cluster_spec['filter'],
x_keys = cluster_spec['xs'],
y_keys = cluster_spec['ys'],
views=vm.views,
orient='x',
prioritize=True)
else:
meta = [cluster_spec['data_key'], cluster_spec['filter']]
df, name = cluster_spec['df'], cluster_spec['name']
self.add(df, meta_from=meta, name=name)
return None
@staticmethod
def _force_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return [obj]
def _check_keys(self, data_key, keys):
""" Checks given keys exist in meta['columns']
"""
keys = self._force_list(keys)
meta = self.stack[data_key].meta
valid = list(meta['columns'].keys()) + list(meta['masks'].keys())
invalid = ['"%s"' % _ for _ in keys if _ not in valid and _ != _TOTAL]
if invalid:
raise ValueError("Keys %s do not exist in meta['columns'] or "
"meta['masks']." % ", ".join(invalid))
return keys
def add(self, structure, meta_from=None, meta=None, name=None):
""" Add a pandas.DataFrame as a Chain.
Parameters
----------
structure : ``pandas.Dataframe``
The dataframe to add to the ChainManger
meta_from : list, list-like, str, default None
The location of the meta in the stack. Either a list-like object with data key and
filter key or a str as the data key
meta : quantipy meta (dict)
External meta used to paint the frame
name : ``str``, default None
The name to give the resulting chain. If not passed, the name will become
the concatenated column names, delimited by a period
Returns
-------
appended : ``quantipy.ChainManager``
"""
name = name or '.'.join(structure.columns.tolist())
chain = Chain(self.stack, name, structure=structure)
chain._frame = chain.structure
chain._index = chain._frame.index
chain._columns = chain._frame.columns
chain._frame_values = chain._frame.values
if meta_from:
if isinstance(meta_from, str):
chain._meta = self.stack[meta_from].meta
else:
data_key, filter_key = meta_from
chain._meta = self.stack[data_key][filter_key].meta
elif meta:
chain._meta = meta
self.__chains.append(chain)
return self
def get(self, data_key, filter_key, x_keys, y_keys, views, orient='x',
rules=True, rules_weight=None, prioritize=True, folder=None):
"""
TODO: Full doc string
Get a (list of) Chain instance(s) in either 'x' or 'y' orientation.
Chain.dfs will be concatenated along the provided 'orient'-axis.
"""
# TODO: VERIFY data_key
# TODO: VERIFY filter_key
# TODO: Add verbose arg to get()
x_keys = self._check_keys(data_key, x_keys)
y_keys = self._check_keys(data_key, y_keys)
if folder and not isinstance(folder, str):
err = "'folder' must be a name provided as string!"
raise ValueError(err)
if orient == 'x':
it, keys = x_keys, y_keys
else:
it, keys = y_keys, x_keys
for key in it:
x_key, y_key = (key, keys) if orient == 'x' else (keys, key)
chain = Chain(self.stack, key)
chain = chain.get(data_key, filter_key, self._force_list(x_key),
self._force_list(y_key), views, rules=rules,
rules_weight=rules_weight, prioritize=prioritize,
orient=orient)
folders = self.folder_names
if folder in folders:
idx = self._idx_from_name(folder)
self.__chains[idx][folder].append(chain)
else:
if folder:
self.__chains.append({folder: [chain]})
else:
self.__chains.append(chain)
return None
def paint_all(self, *args, **kwargs):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_key : str, default meta['lib']['default text']
The language version of any variable metadata applied.
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.index`` labels
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.columns`` labels
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'full', 'cells'}, default cells
Text
totalize : bool, default False
Text
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
for chain in self:
if isinstance(chain, dict):
for c in list(chain.values())[0]:
c.paint(*args, **kwargs)
else:
chain.paint(*args, **kwargs)
return None
HEADERS = ['header-title',
'header-left',
'header-center',
'header-right']
FOOTERS = ['footer-title',
'footer-left',
'footer-center',
'footer-right']
VALID_ANNOT_TYPES = HEADERS + FOOTERS + ['notes']
VALID_ANNOT_CATS = ['header', 'footer', 'notes']
VALID_ANNOT_POS = ['title',
'left',
'center',
'right']
class ChainAnnotations(dict):
def __init__(self):
super(ChainAnnotations, self).__init__()
self.header_title = []
self.header_left = []
self.header_center = []
self.header_right = []
self.footer_title = []
self.footer_left = []
self.footer_center = []
self.footer_right = []
self.notes = []
for v in VALID_ANNOT_TYPES:
self[v] = []
def __setitem__(self, key, value):
self._test_valid_key(key)
return super(ChainAnnotations, self).__setitem__(key, value)
def __getitem__(self, key):
self._test_valid_key(key)
return super(ChainAnnotations, self).__getitem__(key)
def __repr__(self):
headers = [(h.split('-')[1], self[h]) for h in self.populated if
h.split('-')[0] == 'header']
footers = [(f.split('-')[1], self[f]) for f in self.populated if
f.split('-')[0] == 'footer']
notes = self['notes'] if self['notes'] else []
if notes:
ar = 'Notes\n'
ar += '-{:>16}\n'.format(str(notes))
else:
ar = 'Notes: None\n'
if headers:
ar += 'Headers\n'
for pos, text in list(dict(headers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Headers: None\n'
if footers:
ar += 'Footers\n'
for pos, text in list(dict(footers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Footers: None'
return ar
def _test_valid_key(self, key):
"""
"""
if key not in VALID_ANNOT_TYPES:
splitted = key.split('-')
if len(splitted) > 1:
acat, apos = splitted[0], splitted[1]
else:
acat, apos = key, None
if apos:
if acat == 'notes':
msg = "'{}' annotation type does not support positions!"
msg = msg.format(acat)
elif not acat in VALID_ANNOT_CATS and not apos in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation type!".format(key)
elif acat not in VALID_ANNOT_CATS:
msg = "'{}' is not a valid annotation category!".format(acat)
elif apos not in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation position!".format(apos)
else:
msg = "'{}' is not a valid annotation type!".format(key)
raise KeyError(msg)
@property
def header(self):
h_dict = {}
for h in HEADERS:
if self[h]: h_dict[h.split('-')[1]] = self[h]
return h_dict
@property
def footer(self):
f_dict = {}
for f in FOOTERS:
if self[f]: f_dict[f.split('-')[1]] = self[f]
return f_dict
@property
def populated(self):
"""
The annotation fields that are defined.
"""
return sorted([k for k, v in list(self.items()) if v])
@staticmethod
def _annot_key(a_type, a_pos):
if a_pos:
return '{}-{}'.format(a_type, a_pos)
else:
return a_type
def set(self, text, category='header', position='title'):
"""
Add annotation texts defined by their category and position.
Parameters
----------
category : {'header', 'footer', 'notes'}, default 'header'
Defines if the annotation is treated as a *header*, *footer* or
*note*.
position : {'title', 'left', 'center', 'right'}, default 'title'
Sets the placement of the annotation within its category.
Returns
-------
None
"""
if not category: category = 'header'
if not position and category != 'notes': position = 'title'
if category == 'notes': position = None
akey = self._annot_key(category, position)
self[akey].append(text)
self.__dict__[akey.replace('-', '_')].append(text)
return None
CELL_DETAILS = {'en-GB': {'cc': 'Cell Contents',
'N': 'Counts',
'c%': 'Column Percentages',
'r%': 'Row Percentages',
'str': 'Statistical Test Results',
'cp': 'Column Proportions',
'cm': 'Means',
'stats': 'Statistics',
'mb': 'Minimum Base',
'sb': 'Small Base',
'up': ' indicates result is significantly higher than the result in the Total column',
'down': ' indicates result is significantly lower than the result in the Total column'
},
'fr-FR': {'cc': 'Contenu cellule',
'N': 'Total',
'c%': 'Pourcentage de colonne',
'r%': 'Pourcentage de ligne',
'str': 'Résultats test statistique',
'cp': 'Proportions de colonne',
'cm': 'Moyennes de colonne',
'stats': 'Statistiques',
'mb': 'Base minimum',
'sb': 'Petite base',
'up': ' indique que le résultat est significativement supérieur au résultat de la colonne Total',
'down': ' indique que le résultat est significativement inférieur au résultat de la colonne Total'
}}
class Chain(object):
def __init__(self, stack, name, structure=None):
self.stack = stack
self.name = name
self.structure = structure
self.source = 'native'
self.edited = False
self._custom_views = None
self.double_base = False
self.grouping = None
self.sig_test_letters = None
self.totalize = False
self.base_descriptions = None
self.painted = False
self.hidden = False
self.annotations = ChainAnnotations()
self._array_style = None
self._group_style = None
self._meta = None
self._x_keys = None
self._y_keys = None
self._given_views = None
self._grp_text_map = []
self._text_map = None
self._custom_texts = {}
self._transl = qp.core.view.View._metric_name_map()
self._pad_id = None
self._frame = None
self._has_rules = None
self._flag_bases = None
self._is_mask_item = False
self._shapes = None
class _TransformedChainDF(object):
"""
"""
def __init__(self, chain):
c = chain.clone()
self.org_views = c.views
self.df = c._frame
self._org_idx = self.df.index
self._edit_idx = list(range(0, len(self._org_idx)))
self._idx_valmap = {n: o for n, o in
zip(self._edit_idx,
self._org_idx.get_level_values(1))}
self.df.index = self._edit_idx
self._org_col = self.df.columns
self._edit_col = list(range(0, len(self._org_col)))
self._col_valmap = {n: o for n, o in
zip(self._edit_col,
self._org_col.get_level_values(1))}
self.df.columns = self._edit_col
self.array_mi = c._array_style == 0
self.nested_y = c._nested_y
self._nest_mul = self._nesting_multiplier()
return None
def _nesting_multiplier(self):
"""
"""
levels = self._org_col.nlevels
if levels == 2:
return 1
else:
return (levels / 2) + 1
def _insert_viewlikes(self, new_index_flat, org_index_mapped):
inserts = [new_index_flat.index(val) for val in new_index_flat
if not val in list(org_index_mapped.values())]
flatviews = []
for name, no in list(self.org_views.items()):
e = [name] * no
flatviews.extend(e)
for vno, i in enumerate(inserts):
flatviews.insert(i, '__viewlike__{}'.format(vno))
new_views = OrderedDict()
no_of_views = Counter(flatviews)
for fv in flatviews:
if not fv in new_views: new_views[fv] = no_of_views[fv]
return new_views
def _updated_index_tuples(self, axis):
"""
"""
if axis == 1:
current = self.df.columns.values.tolist()
mapped = self._col_valmap
org_tuples = self._org_col.tolist()
else:
current = self.df.index.values.tolist()
mapped = self._idx_valmap
org_tuples = self._org_idx.tolist()
merged = [mapped[val] if val in mapped else val for val in current]
# ================================================================
if (self.array_mi and axis == 1) or axis == 0:
self._transf_views = self._insert_viewlikes(merged, mapped)
else:
self._transf_views = self.org_views
# ================================================================
i = d = 0
new_tuples = []
for merged_val in merged:
idx = i-d if i-d != len(org_tuples) else i-d-1
if org_tuples[idx][1] == merged_val:
new_tuples.append(org_tuples[idx])
else:
empties = ['*'] * self._nest_mul
new_tuple = tuple(empties + [merged_val])
new_tuples.append(new_tuple)
d += 1
i += 1
return new_tuples
def _reindex(self):
"""
"""
y_names = ['Question', 'Values']
if not self.array_mi:
x_names = y_names
else:
x_names = ['Array', 'Questions']
if self.nested_y: y_names = y_names * (self._nest_mul - 1)
tuples = self._updated_index_tuples(axis=1)
self.df.columns = pd.MultiIndex.from_tuples(tuples, names=y_names)
tuples = self._updated_index_tuples(axis=0)
self.df.index = pd.MultiIndex.from_tuples(tuples, names=x_names)
return None
def export(self):
"""
"""
return self._TransformedChainDF(self)
def assign(self, transformed_chain_df):
"""
"""
if not isinstance(transformed_chain_df, self._TransformedChainDF):
raise ValueError("Must pass an exported ``Chain`` instance!")
transformed_chain_df._reindex()
self._frame = transformed_chain_df.df
self.views = transformed_chain_df._transf_views
return None
def __str__(self):
if self.structure is not None:
return '%s...\n%s' % (self.__class__.__name__, str(self.structure.head()))
str_format = ('%s...'
'\nSource: %s'
'\nName: %s'
'\nOrientation: %s'
'\nX: %s'
'\nY: %s'
'\nNumber of views: %s')
return str_format % (self.__class__.__name__,
getattr(self, 'source', 'native'),
getattr(self, 'name', 'None'),
getattr(self, 'orientation', 'None'),
getattr(self, '_x_keys', 'None'),
getattr(self, '_y_keys', 'None'),
getattr(self, 'views', 'None'))
def __repr__(self):
return self.__str__()
def __len__(self):
"""Returns the total number of cells in the Chain.dataframe"""
return (len(getattr(self, 'index', [])) * len(getattr(self, 'columns', [])))
def clone(self):
"""
"""
return copy.deepcopy(self)
@lazy_property
def _default_text(self):
tk = self._meta['lib']['default text']
if tk not in self._transl:
self._transl[tk] = self._transl['en-GB']
return tk
@lazy_property
def orientation(self):
""" TODO: doc string
"""
if len(self._x_keys) == 1 and len(self._y_keys) == 1:
return 'x'
elif len(self._x_keys) == 1:
return 'x'
elif len(self._y_keys) == 1:
return 'y'
if len(self._x_keys) > 1 and len(self._y_keys) > 1:
return None
@lazy_property
def axis(self):
# TODO: name appropriate?
return int(self.orientation=='x')
@lazy_property
def axes(self):
# TODO: name appropriate?
if self.axis == 1:
return self._x_keys, self._y_keys
return self._y_keys, self._x_keys
@property
def dataframe(self):
return self._frame
@property
def index(self):
return self._index
@index.setter
def index(self, index):
self._index = index
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, columns):
self._columns = columns
@property
def frame_values(self):
return self._frame_values
@frame_values.setter
def frame_values(self, frame_values):
self._frame_values = frame_values
@property
def views(self):
return self._views
@views.setter
def views(self, views):
self._views = views
@property
def array_style(self):
return self._array_style
@property
def shapes(self):
if self._shapes is None:
self._shapes = []
return self._shapes
@array_style.setter
def array_style(self, link):
array_style = -1
for view in list(link.keys()):
if link[view].meta()['x']['is_array']:
array_style = 0
if link[view].meta()['y']['is_array']:
array_style = 1
self._array_style = array_style
@property
def pad_id(self):
if self._pad_id is None:
self._pad_id = 0
else:
self._pad_id += 1
return self._pad_id
@property
def sig_levels(self):
sigs = set([v for v in self._valid_views(True)
if v.split('|')[1].startswith('t.')])
tests = [t.split('|')[1].split('.')[1] for t in sigs]
levels = [t.split('|')[1].split('.')[3] for t in sigs]
sig_levels = {}
for m in zip(tests, levels):
l = '.{}'.format(m[1])
t = m[0]
if t in sig_levels:
sig_levels[t].append(l)
else:
sig_levels[t] = [l]
return sig_levels
@property
def cell_items(self):
if self.views:
compl_views = [v for v in self.views if ']*:' in v]
check_views = compl_views[:] or self.views.copy()
for v in check_views:
if v.startswith('__viewlike__'):
if compl_views:
check_views.remove(v)
else:
del check_views[v]
non_freqs = ('d.', 't.')
c = any(v.split('|')[3] == '' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
col_pct = any(v.split('|')[3] == 'y' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
row_pct = any(v.split('|')[3] == 'x' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
c_colpct = c and col_pct
c_rowpct = c and row_pct
c_colrow_pct = c_colpct and c_rowpct
single_ci = not (c_colrow_pct or c_colpct or c_rowpct)
if single_ci:
if c:
return 'counts'
elif col_pct:
return 'colpct'
else:
return 'rowpct'
else:
if c_colrow_pct:
return 'counts_colpct_rowpct'
elif c_colpct:
if self._counts_first():
return 'counts_colpct'
else:
return 'colpct_counts'
else:
return 'counts_rowpct'
@property
def _ci_simple(self):
ci = []
if self.views:
for v in self.views:
if 'significance' in v:
continue
if ']*:' in v:
if v.split('|')[3] == '':
if 'N' not in ci:
ci.append('N')
if v.split('|')[3] == 'y':
if 'c%' not in ci:
ci.append('c%')
if v.split('|')[3] == 'x':
if 'r%' not in ci:
ci.append('r%')
else:
if v.split('|')[-1] == 'counts':
if 'N' not in ci:
ci.append('N')
elif v.split('|')[-1] == 'c%':
if 'c%' not in ci:
ci.append('c%')
elif v.split('|')[-1] == 'r%':
if 'r%' not in ci:
ci.append('r%')
return ci
@property
def ci_count(self):
return len(self.cell_items.split('_'))
@property
def contents(self):
if self.structure:
return
nested = self._array_style == 0
if nested:
dims = self._frame.shape
contents = {row: {col: {} for col in range(0, dims[1])}
for row in range(0, dims[0])}
else:
contents = dict()
for row, idx in enumerate(self._views_per_rows()):
if nested:
for i, v in list(idx.items()):
contents[row][i] = self._add_contents(v)
else:
contents[row] = self._add_contents(idx)
return contents
@property
def cell_details(self):
lang = self._default_text if self._default_text == 'fr-FR' else 'en-GB'
cd = CELL_DETAILS[lang]
ci = self.cell_items
cd_str = '%s (%s)' % (cd['cc'], ', '.join([cd[_] for _ in self._ci_simple]))
against_total = False
if self.sig_test_letters:
mapped = ''
group = None
i = 0 if (self._frame.columns.nlevels in [2, 3]) else 4
for letter, lab in zip(self.sig_test_letters, self._frame.columns.codes[-i]):
if letter == '@':
continue
if group is not None:
if lab == group:
mapped += '/' + letter
else:
group = lab
mapped += ', ' + letter
else:
group = lab
mapped += letter
test_types = cd['cp']
if self.sig_levels.get('means'):
test_types += ', ' + cd['cm']
levels = []
for key in ('props', 'means'):
for level in self.sig_levels.get(key, iter(())):
l = '%s%%' % int(100. - float(level.split('+@')[0].split('.')[1]))
if l not in levels:
levels.append(l)
if '+@' in level:
against_total = True
cd_str = cd_str[:-1] + ', ' + cd['str'] +'), '
cd_str += '%s (%s, (%s): %s' % (cd['stats'], test_types, ', '.join(levels), mapped)
if self._flag_bases:
flags = ([], [])
[(flags[0].append(min), flags[1].append(small)) for min, small in self._flag_bases]
cd_str += ', %s: %s (**), %s: %s (*)' % (cd['mb'], ', '.join(map(str, flags[0])),
cd['sb'], ', '.join(map(str, flags[1])))
cd_str += ')'
cd_str = [cd_str]
if against_total:
cd_str.extend([cd['up'], cd['down']])
return cd_str
def describe(self):
def _describe(cell_defs, row_id):
descr = []
for r, m in list(cell_defs.items()):
descr.append(
[k if isinstance(v, bool) else v for k, v in list(m.items()) if v])
if any('is_block' in d for d in descr):
blocks = self._describe_block(descr, row_id)
calc = 'calc' in blocks
for d, b in zip(descr, blocks):
if b:
d.append(b) if not calc else d.extend([b, 'has_calc'])
return descr
if self._array_style == 0:
description = {k: _describe(v, k) for k, v in list(self.contents.items())}
else:
description = _describe(self.contents, None)
return description
def _fill_cells(self):
"""
"""
self._frame = self._frame.fillna(method='ffill')
return None
# @lazy_property
def _counts_first(self):
for v in self.views:
sname = v.split('|')[-1]
if sname in ['counts', 'c%']:
if sname == 'counts':
return True
else:
return False
#@property
def _views_per_rows(self):
"""
"""
base_vk = 'x|f|x:||{}|cbase'
counts_vk = 'x|f|:||{}|counts'
pct_vk = 'x|f|:|y|{}|c%'
mean_vk = 'x|d.mean|:|y|{}|mean'
stddev_vk = 'x|d.stddev|:|y|{}|stddev'
variance_vk = 'x|d.var|:|y|{}|var'
sem_vk = 'x|d.sem|:|y|{}|sem'
if self.source == 'Crunch multitable':
ci = self._meta['display_settings']['countsOrPercents']
w = self._meta['weight']
if ci == 'counts':
main_vk = counts_vk.format(w if w else '')
else:
main_vk = pct_vk.format(w if w else '')
base_vk = base_vk.format(w if w else '')
metrics = [base_vk] + (len(self.dataframe.index)-1) * [main_vk]
elif self.source == 'Dimensions MTD':
ci = self._meta['cell_items']
w = None
axis_vals = [axv['Type'] for axv in self._meta['index-emetas']]
metrics = []
for axis_val in axis_vals:
if axis_val == 'Base':
metrics.append(base_vk.format(w if w else ''))
if axis_val == 'UnweightedBase':
metrics.append(base_vk.format(w if w else ''))
elif axis_val == 'Category':
metrics.append(counts_vk.format(w if w else ''))
elif axis_val == 'Mean':
metrics.append(mean_vk.format(w if w else ''))
elif axis_val == 'StdDev':
metrics.append(stddev_vk.format(w if w else ''))
elif axis_val == 'StdErr':
metrics.append(sem_vk.format(w if w else ''))
elif axis_val == 'SampleVar':
metrics.append(variance_vk.format(w if w else ''))
return metrics
else:
# Native Chain views
# ----------------------------------------------------------------
if self.edited and (self._custom_views and not self.array_style == 0):
return self._custom_views
else:
if self._array_style != 0:
metrics = []
if self.orientation == 'x':
for view in self._valid_views():
view = self._force_list(view)
initial = view[0]
size = self.views[initial]
metrics.extend(view * size)
else:
for view_part in self.views:
for view in self._valid_views():
view = self._force_list(view)
initial = view[0]
size = view_part[initial]
metrics.extend(view * size)
else:
counts = []
colpcts = []
rowpcts = []
metrics = []
ci = self.cell_items
for v in list(self.views.keys()):
if not v.startswith('__viewlike__'):
parts = v.split('|')
is_completed = ']*:' in v
if not self._is_c_pct(parts):
counts.extend([v]*self.views[v])
if self._is_r_pct(parts):
rowpcts.extend([v]*self.views[v])
if (self._is_c_pct(parts) or self._is_base(parts) or
self._is_stat(parts)):
colpcts.extend([v]*self.views[v])
else:
counts = counts + ['__viewlike__']
colpcts = colpcts + ['__viewlike__']
rowpcts = rowpcts + ['__viewlike__']
dims = self._frame.shape
for row in range(0, dims[0]):
if ci in ['counts_colpct', 'colpct_counts'] and self.grouping:
if row % 2 == 0:
if self._counts_first():
vc = counts
else:
vc = colpcts
else:
if not self._counts_first():
vc = counts
else:
vc = colpcts
else:
vc = counts if ci == 'counts' else colpcts
metrics.append({col: vc[col] for col in range(0, dims[1])})
return metrics
def _valid_views(self, flat=False):
clean_view_list = []
valid = list(self.views.keys())
org_vc = self._given_views
v_likes = [v for v in valid if v.startswith('__viewlike__')]
if isinstance(org_vc, tuple):
v_likes = tuple(v_likes)
view_coll = org_vc + v_likes
for v in view_coll:
if isinstance(v, str):
if v in valid:
clean_view_list.append(v)
else:
new_v = []
for sub_v in v:
if sub_v in valid:
new_v.append(sub_v)
if isinstance(v, tuple):
new_v = list(new_v)
if new_v:
if len(new_v) == 1: new_v = new_v[0]
if not flat:
clean_view_list.append(new_v)
else:
if isinstance(new_v, list):
clean_view_list.extend(new_v)
else:
clean_view_list.append(new_v)
return clean_view_list
def _add_contents(self, viewelement):
"""
"""
if viewelement.startswith('__viewlike__'):
parts = '|||||'
viewlike = True
else:
parts = viewelement.split('|')
viewlike = False
return dict(is_default=self._is_default(parts),
is_c_base=self._is_c_base(parts),
is_r_base=self._is_r_base(parts),
is_e_base=self._is_e_base(parts),
is_c_base_gross=self._is_c_base_gross(parts),
is_counts=self._is_counts(parts),
is_c_pct=self._is_c_pct(parts),
is_r_pct=self._is_r_pct(parts),
is_res_c_pct=self._is_res_c_pct(parts),
is_counts_sum=self._is_counts_sum(parts),
is_c_pct_sum=self._is_c_pct_sum(parts),
is_counts_cumsum=self._is_counts_cumsum(parts),
is_c_pct_cumsum=self._is_c_pct_cumsum(parts),
is_net=self._is_net(parts),
is_block=self._is_block(parts),
is_calc_only = self._is_calc_only(parts),
is_mean=self._is_mean(parts),
is_stddev=self._is_stddev(parts),
is_min=self._is_min(parts),
is_max=self._is_max(parts),
is_median=self._is_median(parts),
is_variance=self._is_variance(parts),
is_sem=self._is_sem(parts),
is_varcoeff=self._is_varcoeff(parts),
is_percentile=self._is_percentile(parts),
is_propstest=self._is_propstest(parts),
is_meanstest=self._is_meanstest(parts),
is_weighted=self._is_weighted(parts),
weight=self._weight(parts),
is_stat=self._is_stat(parts),
stat=self._stat(parts),
siglevel=self._siglevel(parts),
is_viewlike=viewlike)
def _row_pattern(self, target_ci):
"""
"""
cisplit = self.cell_items.split('_')
if target_ci == 'c%':
start = cisplit.index('colpct')
elif target_ci == 'counts':
start = cisplit.index('counts')
repeat = self.ci_count
return (start, repeat)
def _view_idxs(self, view_tags, keep_tests=True, keep_bases=True, names=False, ci=None):
"""
"""
if not isinstance(view_tags, list): view_tags = [view_tags]
rowmeta = self.named_rowmeta
nested = self.array_style == 0
if nested:
if self.ci_count > 1:
rp_idx = self._row_pattern(ci)[0]
rowmeta = rowmeta[rp_idx]
else:
rp_idx = 0
rowmeta = rowmeta[0]
rows = []
for r in rowmeta:
is_code = str(r[0]).isdigit()
if 'is_counts' in r[1] and is_code:
rows.append(('counts', r[1]))
elif 'is_c_pct' in r[1] and is_code:
rows.append(('c%', r[1]))
elif 'is_propstest' in r[1]:
rows.append((r[0], r[1]))
elif 'is_meanstest' in r[1]:
rows.append((r[0], r[1]))
else:
rows.append(r)
invalids = []
if not keep_tests:
invalids.extend(['is_propstest', 'is_meanstest'])
if ci == 'counts':
invalids.append('is_c_pct')
elif ci == 'c%':
invalids.append('is_counts')
idxs = []
names = []
order = []
for i, row in enumerate(rows):
if any([invalid in row[1] for invalid in invalids]):
if not (row[0] == 'All' and keep_bases): continue
if row[0] in view_tags:
order.append(view_tags.index(row[0]))
idxs.append(i)
if nested:
names.append(self._views_per_rows()[rp_idx][i])
else:
names.append(self._views_per_rows()[i])
return (idxs, order) if not names else (idxs, names, order)
@staticmethod
def _remove_grouped_blanks(viewindex_labs):
"""
"""
full = []
for v in viewindex_labs:
if v == '':
full.append(last)
else:
last = v
full.append(last)
return full
def _slice_edited_index(self, axis, positions):
"""
"""
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = [l_one[p] for p in positions]
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
return pd.MultiIndex.from_tuples(axis_tuples, names=names)
def _non_grouped_axis(self):
"""
"""
axis = self._frame.index
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = self._remove_grouped_blanks(l_one)
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
self._frame.index = pd.MultiIndex.from_tuples(axis_tuples, names=names)
return None
@property
def named_rowmeta(self):
if self.painted:
self.toggle_labels()
d = self.describe()
if self.array_style == 0:
n = self._frame.columns.get_level_values(1).values.tolist()
n = self._remove_grouped_blanks(n)
mapped = {rowid: list(zip(n, rowmeta)) for rowid, rowmeta in list(d.items())}
else:
n = self._frame.index.get_level_values(1).values.tolist()
n = self._remove_grouped_blanks(n)
mapped = list(zip(n, d))
if not self.painted: self.toggle_labels()
return mapped
@lazy_property
def _nested_y(self):
return any('>' in v for v in self._y_keys)
def _is_default(self, parts):
return parts[-1] == 'default'
def _is_c_base(self, parts):
return parts[-1] == 'cbase'
def _is_r_base(self, parts):
return parts[-1] == 'rbase'
def _is_e_base(self, parts):
return parts[-1] == 'ebase'
def _is_c_base_gross(self, parts):
return parts[-1] == 'cbase_gross'
def _is_base(self, parts):
return (self._is_c_base(parts) or
self._is_c_base_gross(parts) or
self._is_e_base(parts) or
self._is_r_base(parts))
def _is_counts(self, parts):
return parts[1].startswith('f') and parts[3] == ''
def _is_c_pct(self, parts):
return parts[1].startswith('f') and parts[3] == 'y'
def _is_r_pct(self, parts):
return parts[1].startswith('f') and parts[3] == 'x'
def _is_res_c_pct(self, parts):
return parts[-1] == 'res_c%'
def _is_net(self, parts):
return parts[1].startswith(('f', 'f.c:f', 't.props')) and \
len(parts[2]) > 3 and not parts[2] == 'x++'
def _is_calc_only(self, parts):
if self._is_net(parts) and not self._is_block(parts):
return ((self.__has_freq_calc(parts) or
self.__is_calc_only_propstest(parts)) and not
(self._is_counts_sum(parts) or self._is_c_pct_sum(parts)))
else:
return False
def _is_block(self, parts):
if self._is_net(parts):
conditions = parts[2].split('[')
multiple_conditions = len(conditions) > 2
expand = '+{' in parts[2] or '}+' in parts[2]
complete = '*:' in parts[2]
if expand or complete:
return True
if multiple_conditions:
if self.__has_operator_expr(parts):
return True
return False
return False
return False
def _stat(self, parts):
if parts[1].startswith('d.'):
return parts[1].split('.')[-1]
else:
return None
# non-meta relevant helpers
def __has_operator_expr(self, parts):
e = parts[2]
for syntax in [']*:', '[+{', '}+']:
if syntax in e: e = e.replace(syntax, '')
ops = ['+', '-', '*', '/']
return any(len(e.split(op)) > 1 for op in ops)
def __has_freq_calc(self, parts):
return parts[1].startswith('f.c:f')
def __is_calc_only_propstest(self, parts):
return self._is_propstest(parts) and self.__has_operator_expr(parts)
@staticmethod
def _statname(parts):
split = parts[1].split('.')
if len(split) > 1:
return split[1]
return split[-1]
def _is_mean(self, parts):
return self._statname(parts) == 'mean'
def _is_stddev(self, parts):
return self._statname(parts) == 'stddev'
def _is_min(self, parts):
return self._statname(parts) == 'min'
def _is_max(self, parts):
return self._statname(parts) == 'max'
def _is_median(self, parts):
return self._statname(parts) == 'median'
def _is_variance(self, parts):
return self._statname(parts) == 'var'
def _is_sem(self, parts):
return self._statname(parts) == 'sem'
def _is_varcoeff(self, parts):
return self._statname(parts) == 'varcoeff'
def _is_percentile(self, parts):
return self._statname(parts) in ['upper_q', 'lower_q', 'median']
def _is_counts_sum(self, parts):
return parts[-1].endswith('counts_sum')
def _is_c_pct_sum(self, parts):
return parts[-1].endswith('c%_sum')
def _is_counts_cumsum(self, parts):
return parts[-1].endswith('counts_cumsum')
def _is_c_pct_cumsum(self, parts):
return parts[-1].endswith('c%_cumsum')
def _is_weighted(self, parts):
return parts[4] != ''
def _weight(self, parts):
if parts[4] != '':
return parts[4]
else:
return None
def _is_stat(self, parts):
return parts[1].startswith('d.')
def _is_propstest(self, parts):
return parts[1].startswith('t.props')
def _is_meanstest(self, parts):
return parts[1].startswith('t.means')
def _siglevel(self, parts):
if self._is_meanstest(parts) or self._is_propstest(parts):
return parts[1].split('.')[-1]
else:
return None
def _describe_block(self, description, row_id):
if self.painted:
repaint = True
self.toggle_labels()
else:
repaint = False
vpr = self._views_per_rows()
if row_id is not None:
vpr = [v[1] for v in list(vpr[row_id].items())]
idx = self.dataframe.columns.get_level_values(1).tolist()
else:
idx = self.dataframe.index.get_level_values(1).tolist()
idx_view_map = list(zip(idx, vpr))
block_net_vk = [v for v in vpr if len(v.split('|')[2].split('['))>2 or
'[+{' in v.split('|')[2] or '}+]' in v.split('|')[2]]
has_calc = any([v.split('|')[1].startswith('f.c') for v in block_net_vk])
is_tested = any(v.split('|')[1].startswith('t.props') for v in vpr)
if block_net_vk:
expr = block_net_vk[0].split('|')[2]
expanded_codes = set(map(int, re.findall(r'\d+', expr)))
else:
expanded_codes = []
for idx, m in enumerate(idx_view_map):
if idx_view_map[idx][0] == '':
idx_view_map[idx] = (idx_view_map[idx-1][0], idx_view_map[idx][1])
for idx, row in enumerate(description):
if not 'is_block' in row:
idx_view_map[idx] = None
blocks_len = len(expr.split('],')) * (self.ci_count + is_tested)
if has_calc: blocks_len -= (self.ci_count + is_tested)
block_net_def = []
described_nets = 0
for e in idx_view_map:
if e:
if isinstance(e[0], str):
if has_calc and described_nets == blocks_len:
block_net_def.append('calc')
else:
block_net_def.append('net')
described_nets += 1
else:
code = int(e[0])
if code in expanded_codes:
block_net_def.append('expanded')
else:
block_net_def.append('normal')
else:
block_net_def.append(e)
if repaint: self.toggle_labels()
return block_net_def
def get(self, data_key, filter_key, x_keys, y_keys, views, rules=False,
rules_weight=None, orient='x', prioritize=True):
""" Get the concatenated Chain.DataFrame
"""
self._meta = self.stack[data_key].meta
self._given_views = views
self._x_keys = x_keys
self._y_keys = y_keys
concat_axis = 0
if rules:
if not isinstance(rules, list):
self._has_rules = ['x', 'y']
else:
self._has_rules = rules
# use_views = views[:]
# for first in self.axes[0]:
# for second in self.axes[1]:
# link = self._get_link(data_key, filter_key, first, second)
# for v in use_views:
# if v not in link:
# use_views.remove(v)
for first in self.axes[0]:
found = []
x_frames = []
for second in self.axes[1]:
if self.axis == 1:
link = self._get_link(data_key, filter_key, first, second)
else:
link = self._get_link(data_key, filter_key, second, first)
if link is None:
continue
if prioritize: link = self._drop_substituted_views(link)
found_views, y_frames = self._concat_views(
link, views, rules_weight)
found.append(found_views)
try:
if self._meta['columns'][link.x].get('parent'):
self._is_mask_item = True
except KeyError:
pass
# TODO: contains arrary summ. attr.
# TODO: make this work y_frames = self._pad_frames(y_frames)
self.array_style = link
if self.array_style > -1:
concat_axis = 1 if self.array_style == 0 else 0
y_frames = self._pad_frames(y_frames)
x_frames.append(pd.concat(y_frames, axis=concat_axis))
self.shapes.append(x_frames[-1].shape)
self._frame = pd.concat(self._pad(x_frames), axis=self.axis)
if self._group_style == 'reduced' and self.array_style >- 1:
scan_views = [v if isinstance(v, (tuple, list)) else [v]
for v in self._given_views]
scan_views = [v for v in scan_views if len(v) > 1]
no_tests = []
for scan_view in scan_views:
new_views = []
for view in scan_view:
if not view.split('|')[1].startswith('t.'):
new_views.append(view)
no_tests.append(new_views)
cond = any(len(v) >= 2 for v in no_tests)
if cond:
self._frame = self._reduce_grouped_index(self._frame, 2, self._array_style)
if self.axis == 1:
self.views = found[-1]
else:
self.views = found
self.double_base = len([v for v in self.views
if v.split('|')[-1] == 'cbase']) > 1
self._index = self._frame.index
self._columns = self._frame.columns
self._extract_base_descriptions()
del self.stack
return self
def _toggle_bases(self, keep_weighted=True):
df = self._frame
is_array = self._array_style == 0
contents = self.contents[0] if is_array else self.contents
has_wgt_b = [k for k, v in list(contents.items())
if v['is_c_base'] and v['is_weighted']]
has_unwgt_b = [k for k, v in list(contents.items())
if v['is_c_base'] and not v['is_weighted']]
if not (has_wgt_b and has_unwgt_b):
return None
if keep_weighted:
drop_rows = has_unwgt_b
names = ['x|f|x:|||cbase']
else:
drop_rows = has_wgt_b
names = ['x|f|x:||{}|cbase'.format(list(contents.values())[0]['weight'])]
for v in self.views.copy():
if v in names:
del self._views[v]
df = self._frame
if is_array:
cols = [col for x, col in enumerate(df.columns.tolist())
if not x in drop_rows]
df = df.loc[:, cols]
else:
rows = [row for x, row in enumerate(df.index.tolist())
if not x in drop_rows]
df = df.loc[rows, :]
self._frame = df
self._index = df.index
self._columns = df.columns
return None
def _slice_edited_index(self, axis, positions):
"""
"""
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = [l_one[p] for p in positions]
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
return pd.MultiIndex.from_tuples(axis_tuples, names=names)
def _drop_substituted_views(self, link):
if any(isinstance(sect, (list, tuple)) for sect in self._given_views):
chain_views = list(chain.from_iterable(self._given_views))
else:
chain_views = self._given_views
has_compl = any(']*:' in vk for vk in link)
req_compl = any(']*:' in vk for vk in chain_views)
has_cumsum = any('++' in vk for vk in link)
req_cumsum = any('++' in vk for vk in chain_views)
if (has_compl and req_compl) or (has_cumsum and req_cumsum):
new_link = copy.copy(link)
views = []
for vk in link:
vksplit = vk.split('|')
method, cond, name = vksplit[1], vksplit[2], vksplit[-1]
full_frame = name in ['counts', 'c%']
basic_sigtest = method.startswith('t.') and cond == ':'
if not full_frame and not basic_sigtest: views.append(vk)
for vk in link:
if vk not in views: del new_link[vk]
return new_link
else:
return link
def _pad_frames(self, frames):
""" TODO: doc string
"""
empty_frame = lambda f: pd.DataFrame(index=f.index, columns=f.columns)
max_lab = max(f.axes[self.array_style].size for f in frames)
for e, f in enumerate(frames):
size = f.axes[self.array_style].size
if size < max_lab:
f = pd.concat([f, empty_frame(f)], axis=self.array_style)
order = [None] * (size * 2)
order[::2] = list(range(size))
order[1::2] = list(range(size, size * 2))
if self.array_style == 0:
frames[e] = f.iloc[order, :]
else:
frames[e] = f.iloc[:, order]
return frames
def _get_link(self, data_key, filter_key, x_key, y_key):
"""
"""
base = self.stack[data_key][filter_key]
if x_key in base:
base = base[x_key]
if y_key in base:
return base[y_key]
else:
if self._array_style == -1:
self._y_keys.remove(y_key)
else:
self._x_keys.remove(x_key)
return None
def _index_switch(self, axis):
""" Returns self.dataframe/frame index/ columns based on given x/ y
"""
return dict(x=self._frame.index, y=self._frame.columns).get(axis)
def _pad(self, frames):
""" Pad index/ columns when nlevels is less than the max nlevels
in list of dataframes.
"""
indexes = []
max_nlevels = [max(f.axes[i].nlevels for f in frames) for i in (0, 1)]
for e, f in enumerate(frames):
indexes = []
for i in (0, 1):
if f.axes[i].nlevels < max_nlevels[i]:
indexes.append(self._pad_index(f.axes[i], max_nlevels[i]))
else:
indexes.append(f.axes[i])
frames[e].index, frames[e].columns = indexes
return frames
def _pad_index(self, index, size):
""" Add levels to columns MultiIndex so the nlevels matches
the biggest columns MultiIndex in DataFrames to be concatenated.
"""
pid = self.pad_id
pad = ((size - index.nlevels) // 2)
fill = int((pad % 2) == 1)
names = list(index.names)
names[0:0] = names[:2] * pad
arrays = self._lzip(index.values)
arrays[0:0] = [tuple('#pad-%s' % pid for _ in arrays[i])
for i in range(pad + fill)] * pad
return pd.MultiIndex.from_arrays(arrays, names=names)
@staticmethod
def _reindx_source(df, varname, total):
"""
"""
df.index = df.index.set_levels([varname], level=0, inplace=False)
if df.columns.get_level_values(0).tolist()[0] != varname and total:
df.columns = df.columns.set_levels([varname], level=0, inplace=False)
return df
def _concat_views(self, link, views, rules_weight, found=None):
""" Concatenates the Views of a Chain.
"""
frames = []
totals = [[_TOTAL]] * 2
if found is None:
found = OrderedDict()
if self._text_map is None:
self._text_map = dict()
for view in views:
try:
self.array_style = link
if isinstance(view, (list, tuple)):
if not self.grouping:
self.grouping = True
if isinstance(view, tuple):
self._group_style = 'reduced'
else:
self._group_style = 'normal'
if self.array_style > -1:
use_grp_type = 'normal'
else:
use_grp_type = self._group_style
found, grouped = self._concat_views(link, view, rules_weight, found=found)
if grouped:
frames.append(self._group_views(grouped, use_grp_type))
else:
agg = link[view].meta()['agg']
is_descriptive = agg['method'] == 'descriptives'
is_base = agg['name'] in ['cbase', 'rbase', 'ebase', 'cbase_gross']
is_sum = agg['name'] in ['counts_sum', 'c%_sum']
is_net = link[view].is_net()
oth_src = link[view].has_other_source()
no_total_sign = is_descriptive or is_base or is_sum or is_net
if link[view]._custom_txt and is_descriptive:
statname = agg['fullname'].split('|')[1].split('.')[1]
if not statname in self._custom_texts:
self._custom_texts[statname] = []
self._custom_texts[statname].append(link[view]._custom_txt)
if is_descriptive:
text = agg['name']
try:
self._text_map.update({agg['name']: text})
except AttributeError:
self._text_map = {agg['name']: text}
if agg['text']:
name = dict(cbase='All').get(agg['name'], agg['name'])
try:
self._text_map.update({name: agg['text']})
except AttributeError:
self._text_map = {name: agg['text'],
_TOTAL: 'Total'}
if agg['grp_text_map']:
# try:
if not agg['grp_text_map'] in self._grp_text_map:
self._grp_text_map.append(agg['grp_text_map'])
# except AttributeError:
# self._grp_text_map = [agg['grp_text_map']]
frame = link[view].dataframe
if oth_src:
frame = self._reindx_source(frame, link.x, link.y == _TOTAL)
# RULES SECTION
# ========================================================
# TODO: DYNAMIC RULES:
# - all_rules_axes, rules_weight must be provided not hardcoded
# - Review copy/pickle in original version!!!
rules_weight = None
if self._has_rules:
rules = Rules(link, view, self._has_rules, rules_weight)
# print rules.show_rules()
# rules.get_slicer()
# print rules.show_slicers()
rules.apply()
frame = rules.rules_df()
# ========================================================
if not no_total_sign and (link.x == _TOTAL or link.y == _TOTAL):
if link.x == _TOTAL:
level_names = [[link.y], ['@']]
elif link.y == _TOTAL:
level_names = [[link.x], ['@']]
try:
frame.columns.set_levels(level_names, level=[0, 1],
inplace=True)
except ValueError:
pass
frames.append(frame)
if view not in found:
if self._array_style != 0:
found[view] = len(frame.index)
else:
found[view] = len(frame.columns)
if link[view]._kwargs.get('flag_bases'):
flag_bases = link[view]._kwargs['flag_bases']
try:
if flag_bases not in self._flag_bases:
self._flag_bases.append(flag_bases)
except TypeError:
self._flag_bases = [flag_bases]
except KeyError:
pass
return found, frames
@staticmethod
def _temp_nest_index(df):
"""
Flatten the nested MultiIndex for easier handling.
"""
# Build flat column labels
flat_cols = []
order_idx = []
i = -1
for col in df.columns.values:
flat_col_lab = ''.join(str(col[:-1])).strip()
if not flat_col_lab in flat_cols:
i += 1
order_idx.append(i)
flat_cols.append(flat_col_lab)
else:
order_idx.append(i)
# Drop unwanted levels (keep last Values Index-level in that process)
levels = list(range(0, df.columns.nlevels-1))
drop_levels = levels[:-2]+ [levels[-1]]
df.columns = df.columns.droplevel(drop_levels)
# Apply the new flat labels and resort the columns
df.columns.set_levels(levels=flat_cols, level=0, inplace=True)
df.columns.set_codes(order_idx, level=0, inplace=True)
return df, flat_cols
@staticmethod
def _replace_test_results(df, replacement_map, char_repr):
"""
Swap all digit-based results with letters referencing the column header.
.. note:: The modified df will be stripped of all indexing on both rows
and columns.
"""
all_dfs = []
ignore = False
for col in list(replacement_map.keys()):
target_col = df.columns[0] if col == '@' else col
value_df = df[[target_col]].copy()
if not col == '@':
value_df.drop('@', axis=1, level=1, inplace=True)
values = value_df.replace(np.NaN, '-').values.tolist()
r = replacement_map[col]
new_values = []
case = None
for v in values:
if isinstance(v[0], str):
if char_repr == 'upper':
case = 'up'
elif char_repr == 'lower':
case = 'low'
elif char_repr == 'alternate':
if case == 'up':
case = 'low'
else:
case = 'up'
for no, l in sorted(list(r.items()), reverse=True):
v = [char.replace(str(no), l if case == 'up' else l.lower())
if isinstance(char, str)
else char for char in v]
new_values.append(v)
else:
new_values.append(v)
part_df = pd.DataFrame(new_values)
all_dfs.append(part_df)
letter_df = pd.concat(all_dfs, axis=1)
# Clean it up
letter_df.replace('-', np.NaN, inplace=True)
for signs in [('[', ''), (']', ''), (', ', '.')]:
letter_df = letter_df.applymap(lambda x: x.replace(signs[0], signs[1])
if isinstance(x, str) else x)
return letter_df
@staticmethod
def _get_abc_letters(no_of_cols, incl_total):
"""
Get the list of letter replacements depending on the y-axis length.
"""
repeat_alphabet = int(no_of_cols / 26)
abc = list(string.ascii_uppercase)
letters = list(string.ascii_uppercase)
if repeat_alphabet:
for r in range(0, repeat_alphabet):
letter = abc[r]
extend_abc = ['{}{}'.format(letter, l) for l in abc]
letters.extend(extend_abc)
if incl_total:
letters = ['@'] + letters[:no_of_cols-1]
else:
letters = letters[:no_of_cols]
return letters
def _any_tests(self):
vms = [v.split('|')[1] for v in list(self._views.keys())]
return any('t.' in v for v in vms)
def _no_of_tests(self):
tests = [v for v in list(self._views.keys())
if v.split('|')[1].startswith('t.')]
levels = [v.split('|')[1].split('.')[-1] for v in tests]
return len(set(levels))
def _siglevel_on_row(self):
"""
"""
vpr = self._views_per_rows()
tests = [(no, v) for no, v in enumerate(vpr)
if v.split('|')[1].startswith('t.')]
s = [(t[0],
float(int(t[1].split('|')[1].split('.')[3].split('+')[0]))/100.0)
for t in tests]
return s
def transform_tests(self, char_repr='upper', display_level=True):
"""
Transform column-wise digit-based test representation to letters.
Adds a new row that is applying uppercase letters to all columns (A,
B, C, ...) and maps any significance test's result cells to these column
indicators.
"""
if not self._any_tests(): return None
# Preparation of input dataframe and dimensions of y-axis header
df = self.dataframe.copy()
number_codes = df.columns.get_level_values(-1).tolist()
number_header_row = copy.copy(df.columns)
if self._no_of_tests() != 2 and char_repr == 'alternate':
char_repr = 'upper'
has_total = '@' in self._y_keys
if self._nested_y:
df, questions = self._temp_nest_index(df)
else:
questions = self._y_keys
all_num = number_codes if not has_total else [0] + number_codes[1:]
# Set the new column header (ABC, ...)
column_letters = self._get_abc_letters(len(number_codes), has_total)
vals = df.columns.get_level_values(0).tolist()
mi = pd.MultiIndex.from_arrays(
(vals,
column_letters))
df.columns = mi
self.sig_test_letters = df.columns.get_level_values(1).tolist()
# Build the replacements dict and build list of unique column indices
test_dict = OrderedDict()
for num_idx, col in enumerate(df.columns):
if col[1] == '@':
question = col[1]
else:
question = col[0]
if not question in test_dict: test_dict[question] = {}
number = all_num[num_idx]
letter = col[1]
test_dict[question][number] = letter
letter_df = self._replace_test_results(df, test_dict, char_repr)
# Re-apply indexing & finalize the new crossbreak column header
if display_level:
levels = self._siglevel_on_row()
index = df.index.get_level_values(1).tolist()
for i, l in levels:
index[i] = '#Level: {}'.format(l)
l0 = df.index.get_level_values(0).tolist()[0]
tuples = [(l0, i) for i in index]
index = pd.MultiIndex.from_tuples(
tuples, names=['Question', 'Values'])
letter_df.index = index
else:
letter_df.index = df.index
letter_df.columns = number_header_row
letter_df = self._apply_letter_header(letter_df)
self._frame = letter_df
return self
def _remove_letter_header(self):
self._frame.columns = self._frame.columns.droplevel(level=-1)
return None
def _apply_letter_header(self, df):
"""
"""
new_tuples = []
org_names = [n for n in df.columns.names]
idx = df.columns
for i, l in zip(idx, self.sig_test_letters):
new_tuples.append(i + (l, ))
if not 'Test-IDs' in org_names:
org_names.append('Test-IDs')
mi = pd.MultiIndex.from_tuples(new_tuples, names=org_names)
df.columns = mi
return df
def _extract_base_descriptions(self):
"""
"""
if self.source == 'Crunch multitable':
self.base_descriptions = self._meta['var_meta'].get('notes', None)
else:
base_texts = OrderedDict()
arr_style = self.array_style
if arr_style != -1:
var = self._x_keys[0] if arr_style == 0 else self._y_keys[0]
masks = self._meta['masks']
columns = self._meta['columns']
item = masks[var]['items'][0]['source'].split('@')[-1]
test_item = columns[item]
test_mask = masks[var]
if 'properties' in test_mask:
base_text = test_mask['properties'].get('base_text', None)
elif 'properties' in test_item:
base_text = test_item['properties'].get('base_text', None)
else:
base_text = None
self.base_descriptions = base_text
else:
for x in self._x_keys:
if 'properties' in self._meta['columns'][x]:
bt = self._meta['columns'][x]['properties'].get('base_text', None)
if bt:
base_texts[x] = bt
if base_texts:
if self.orientation == 'x':
self.base_descriptions = list(base_texts.values())[0]
else:
self.base_descriptions = list(base_texts.values())
return None
def _ensure_indexes(self):
if self.painted:
self._frame.index, self._frame.columns = self.index, self.columns
if self.structure is not None:
self._frame.loc[:, :] = self.frame_values
else:
self.index, self.columns = self._frame.index, self._frame.columns
if self.structure is not None:
self.frame_values = self._frame.values
def _finish_text_key(self, text_key, text_loc_x, text_loc_y):
text_keys = dict()
text_key = text_key or self._default_text
if text_loc_x:
text_keys['x'] = (text_loc_x, text_key)
else:
text_keys['x'] = text_key
if text_loc_y:
text_keys['y'] = (text_loc_y, text_key)
else:
text_keys['y'] = text_key
return text_keys
def paint(self, text_key=None, text_loc_x=None, text_loc_y=None, display=None,
axes=None, view_level=False, transform_tests='upper', display_level=True,
add_test_ids=True, add_base_texts='simple', totalize=False,
sep=None, na_rep=None, transform_column_names=None,
exclude_mask_text=False):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_keys : str, default None
Text
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the x-axis
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the y-axis
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'upper', 'lower', 'alternate'}, default 'upper'
Text
add_test_ids : bool, default True
Text
add_base_texts : {False, 'all', 'simple', 'simple-no-items'}, default 'simple'
Whether or not to include existing ``.base_descriptions`` str
to the label of the appropriate base view. Selecting ``'simple'``
will inject the base texts to non-array type Chains only.
totalize : bool, default True
Text
sep : str, default None
The seperator used for painting ``pandas.DataFrame`` columns
na_rep : str, default None
numpy.NaN will be replaced with na_rep if passed
transform_column_names : dict, default None
Transformed column_names are added to the labeltexts.
exclude_mask_text : bool, default False
Exclude mask text from mask-item texts.
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
self._ensure_indexes()
text_keys = self._finish_text_key(text_key, text_loc_x, text_loc_y)
if self.structure is not None:
self._paint_structure(text_key, sep=sep, na_rep=na_rep)
else:
self.totalize = totalize
if transform_tests: self.transform_tests(transform_tests, display_level)
# Remove any letter header row from transformed tests...
if self.sig_test_letters:
self._remove_letter_header()
if display is None:
display = _AXES
if axes is None:
axes = _AXES
self._paint(text_keys, display, axes, add_base_texts,
transform_column_names, exclude_mask_text)
# Re-build the full column index (labels + letter row)
if self.sig_test_letters and add_test_ids:
self._frame = self._apply_letter_header(self._frame)
if view_level:
self._add_view_level()
self.painted = True
return None
def _paint_structure(self, text_key=None, sep=None, na_rep=None):
""" Paint the dataframe-type Chain.
"""
if not text_key:
text_key = self._meta['lib']['default text']
str_format = '%%s%s%%s' % sep
column_mapper = dict()
na_rep = na_rep or ''
pattern = r'\, (?=\W|$)'
for column in self.structure.columns:
if not column in self._meta['columns']: continue
meta = self._meta['columns'][column]
if sep:
column_mapper[column] = str_format % (column, meta['text'][text_key])
else:
column_mapper[column] = meta['text'][text_key]
if meta.get('values'):
values = meta['values']
if isinstance(values, str):
pointers = values.split('@')
values = self._meta[pointers.pop(0)]
while pointers:
values = values[pointers.pop(0)]
if meta['type'] == 'delimited set':
value_mapper = {
str(item['value']): item['text'][text_key]
for item in values
}
series = self.structure[column]
try:
series = (series.str.split(';')
.apply(pd.Series, 1)
.stack(dropna=False)
.map(value_mapper.get) #, na_action='ignore')
.unstack())
first = series[series.columns[0]]
rest = [series[c] for c in series.columns[1:]]
self.structure[column] = (
first
.str.cat(rest, sep=', ', na_rep='')
.str.slice(0, -2)
.replace(to_replace=pattern, value='', regex=True)
.replace(to_replace='', value=na_rep)
)
except AttributeError:
continue
else:
value_mapper = {
item['value']: item['text'][text_key]
for item in values
}
self.structure[column] = (self.structure[column]
.map(value_mapper.get,
na_action='ignore')
)
self.structure[column].fillna(na_rep, inplace=True)
self.structure.rename(columns=column_mapper, inplace=True)
def _paint(self, text_keys, display, axes, bases, transform_column_names,
exclude_mask_text):
""" Paint the Chain.dataframe
"""
indexes = []
for axis in _AXES:
index = self._index_switch(axis)
if axis in axes:
index = self._paint_index(index, text_keys, display, axis,
bases, transform_column_names,
exclude_mask_text)
indexes.append(index)
self._frame.index, self._frame.columns = indexes
def _paint_index(self, index, text_keys, display, axis, bases,
transform_column_names, exclude_mask_text):
""" Paint the Chain.dataframe.index1 """
error = "No text keys from {} found in {}"
level_0_text, level_1_text = [], []
nlevels = index.nlevels
if nlevels > 2:
arrays = []
for i in range(0, nlevels, 2):
index_0 = index.get_level_values(i)
index_1 = index.get_level_values(i+1)
tuples = list(zip(index_0.values, index_1.values))
names = (index_0.name, index_1.name)
sub = pd.MultiIndex.from_tuples(tuples, names=names)
sub = self._paint_index(sub, text_keys, display, axis, bases,
transform_column_names, exclude_mask_text)
arrays.extend(self._lzip(sub.ravel()))
tuples = self._lzip(arrays)
return pd.MultiIndex.from_tuples(tuples, names=index.names)
levels = self._lzip(index.values)
arrays = (self._get_level_0(levels[0], text_keys, display, axis,
transform_column_names, exclude_mask_text),
self._get_level_1(levels, text_keys, display, axis, bases))
new_index = pd.MultiIndex.from_arrays(arrays, names=index.names)
return new_index
def _get_level_0(self, level, text_keys, display, axis,
transform_column_names, exclude_mask_text):
"""
"""
level_0_text = []
for value in level:
if str(value).startswith('#pad'):
pass
elif pd.notnull(value):
if value in list(self._text_map.keys()):
value = self._text_map[value]
else:
text = self._get_text(value, text_keys[axis], exclude_mask_text)
if axis in display:
if transform_column_names:
value = transform_column_names.get(value, value)
value = '{}. {}'.format(value, text)
else:
value = text
level_0_text.append(value)
if '@' in self._y_keys and self.totalize and axis == 'y':
level_0_text = ['Total'] + level_0_text[1:]
return list(map(str, level_0_text))
def _get_level_1(self, levels, text_keys, display, axis, bases):
"""
"""
level_1_text = []
if text_keys[axis] in self._transl:
tk_transl = text_keys[axis]
else:
tk_transl = self._default_text
c_text = copy.deepcopy(self._custom_texts) if self._custom_texts else {}
for i, value in enumerate(levels[1]):
if str(value).startswith('#pad'):
level_1_text.append(value)
elif pd.isnull(value):
level_1_text.append(value)
elif str(value) == '':
level_1_text.append(value)
elif str(value).startswith('#Level: '):
level_1_text.append(value.replace('#Level: ', ''))
else:
translate = list(self._transl[list(self._transl.keys())[0]].keys())
if value in list(self._text_map.keys()) and value not in translate:
level_1_text.append(self._text_map[value])
elif value in translate:
if value == 'All':
text = self._specify_base(i, text_keys[axis], bases)
else:
text = self._transl[tk_transl][value]
if value in c_text:
add_text = c_text[value].pop(0)
text = '{} {}'.format(text, add_text)
level_1_text.append(text)
elif value == 'All (eff.)':
text = self._specify_base(i, text_keys[axis], bases)
level_1_text.append(text)
else:
if any(self.array_style == a and axis == x for a, x in ((0, 'x'), (1, 'y'))):
text = self._get_text(value, text_keys[axis], True)
level_1_text.append(text)
else:
try:
values = self._get_values(levels[0][i])
if not values:
level_1_text.append(value)
else:
for item in self._get_values(levels[0][i]):
if int(value) == item['value']:
text = self._get_text(item, text_keys[axis])
level_1_text.append(text)
except (ValueError, UnboundLocalError):
if self._grp_text_map:
for gtm in self._grp_text_map:
if value in list(gtm.keys()):
text = self._get_text(gtm[value], text_keys[axis])
level_1_text.append(text)
return list(map(str, level_1_text))
@staticmethod
def _unwgt_label(views, base_vk):
valid = ['cbase', 'cbase_gross', 'rbase', 'ebase']
basetype = base_vk.split('|')[-1]
views_split = [v.split('|') for v in views]
multibase = len([v for v in views_split if v[-1] == basetype]) > 1
weighted = base_vk.split('|')[-2]
w_diff = len([v for v in views_split
if not v[-1] in valid and not v[-2] == weighted]) > 0
if weighted:
return False
elif multibase or w_diff:
return True
else:
return False
def _add_base_text(self, base_val, tk, bases):
if self._array_style == 0 and bases != 'all':
return base_val
else:
bt = self.base_descriptions
if isinstance(bt, dict):
bt_by_key = bt[tk]
else:
bt_by_key = bt
if bt_by_key:
if bt_by_key.startswith('%s:' % base_val):
bt_by_key = bt_by_key.replace('%s:' % base_val, '')
return '{}: {}'.format(base_val, bt_by_key)
else:
return base_val
def _specify_base(self, view_idx, tk, bases):
tk_transl = tk if tk in self._transl else self._default_text
base_vk = self._valid_views()[view_idx]
basetype = base_vk.split('|')[-1]
unwgt_label = self._unwgt_label(list(self._views.keys()), base_vk)
if unwgt_label:
if basetype == 'cbase_gross':
base_value = self._transl[tk_transl]['no_w_gross_All']
elif basetype == 'ebase':
base_value = 'Unweighted effective base'
else:
base_value = self._transl[tk_transl]['no_w_All']
else:
if basetype == 'cbase_gross':
base_value = self._transl[tk_transl]['gross All']
elif basetype == 'ebase':
base_value = 'Effective base'
elif not bases or (bases == 'simple-no-items' and self._is_mask_item):
base_value = self._transl[tk_transl]['All']
else:
key = tk
if isinstance(tk, tuple):
_, key = tk
base_value = self._add_base_text(self._transl[tk_transl]['All'],
key, bases)
return base_value
def _get_text(self, value, text_key, item_text=False):
"""
"""
if value in list(self._meta['columns'].keys()):
col = self._meta['columns'][value]
if item_text and col.get('parent'):
parent = list(col['parent'].keys())[0].split('@')[-1]
items = self._meta['masks'][parent]['items']
for i in items:
if i['source'].split('@')[-1] == value:
obj = i['text']
break
else:
obj = col['text']
elif value in list(self._meta['masks'].keys()):
obj = self._meta['masks'][value]['text']
elif 'text' in value:
obj = value['text']
else:
obj = value
return self._get_text_from_key(obj, text_key)
def _get_text_from_key(self, text, text_key):
""" Find the first value in a meta object's "text" key that matches a
text_key for it's axis.
"""
if isinstance(text_key, tuple):
loc, key = text_key
if loc in text:
if key in text[loc]:
return text[loc][key]
elif self._default_text in text[loc]:
return text[loc][self._default_text]
if key in text:
return text[key]
for key in (text_key, self._default_text):
if key in text:
return text[key]
return '<label>'
def _get_values(self, column):
""" Returns values from self._meta["columns"] or
self._meta["lib"]["values"][<mask name>] if parent is "array"
"""
if column in self._meta['columns']:
values = self._meta['columns'][column].get('values', [])
elif column in self._meta['masks']:
values = self._meta['lib']['values'].get(column, [])
if isinstance(values, str):
keys = values.split('@')
values = self._meta[keys.pop(0)]
while keys:
values = values[keys.pop(0)]
return values
def _add_view_level(self, shorten=False):
""" Insert a third Index level containing View keys into the DataFrame.
"""
vnames = self._views_per_rows()
if shorten:
vnames = [v.split('|')[-1] for v in vnames]
self._frame['View'] = pd.Series(vnames, index=self._frame.index)
self._frame.set_index('View', append=True, inplace=True)
def toggle_labels(self):
""" Restore the unpainted/ painted Index, Columns appearance.
"""
if self.painted:
self.painted = False
else:
self.painted = True
attrs = ['index', 'columns']
if self.structure is not None:
attrs.append('_frame_values')
for attr in attrs:
vals = attr[6:] if attr.startswith('_frame') else attr
frame_val = getattr(self._frame, vals)
setattr(self._frame, attr, getattr(self, attr))
setattr(self, attr, frame_val)
if self.structure is not None:
values = self._frame.values
self._frame.loc[:, :] = self.frame_values
self.frame_values = values
return self
@staticmethod
def _single_column(*levels):
""" Returns True if multiindex level 0 has one unique value
"""
return all(len(level) == 1 for level in levels)
def _group_views(self, frame, group_type):
""" Re-sort rows so that they appear as being grouped inside the
Chain.dataframe.
"""
grouped_frame = []
len_of_frame = len(frame)
frame = pd.concat(frame, axis=0)
index_order = frame.index.get_level_values(1).tolist()
index_order = index_order[:int(len(index_order) / len_of_frame)]
gb_df = frame.groupby(level=1, sort=False)
for i in index_order:
grouped_df = gb_df.get_group(i)
if group_type == 'reduced':
grouped_df = self._reduce_grouped_index(grouped_df, len_of_frame-1)
grouped_frame.append(grouped_df)
grouped_frame = pd.concat(grouped_frame, verify_integrity=False)
return grouped_frame
@staticmethod
def _reduce_grouped_index(grouped_df, view_padding, array_summary=-1):
idx = grouped_df.index
q = idx.get_level_values(0).tolist()[0]
if array_summary == 0:
val = idx.get_level_values(1).tolist()
for index in range(1, len(val), 2):
val[index] = ''
grp_vals = val
elif array_summary == 1:
grp_vals = []
indexed = []
val = idx.get_level_values(1).tolist()
for v in val:
if not v in indexed or v == 'All':
grp_vals.append(v)
indexed.append(v)
else:
grp_vals.append('')
else:
val = idx.get_level_values(1).tolist()[0]
grp_vals = [val] + [''] * view_padding
mi = pd.MultiIndex.from_product([[q], grp_vals], names=idx.names)
grouped_df.index = mi
return grouped_df
@staticmethod
def _lzip(arr):
"""
"""
return list(zip(*arr))
@staticmethod
def _force_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return [obj]
@classmethod
def __pad_id(cls):
cls._pad_id += 1
return cls._pad_id
# class MTDChain(Chain):
# def __init__(self, mtd_doc, name=None):
# super(MTDChain, self).__init__(stack=None, name=name, structure=None)
# self.mtd_doc = mtd_doc
# self.source = 'Dimensions MTD'
self.get = self._get
# def _get(self, ignore=None, labels=True):
# per_folder = OrderedDict()
# failed = []
# unsupported = []
# for name, tab_def in self.mtd_doc.items():
# try:
# if isinstance(tab_def.values()[0], dict):
# unsupported.append(name)
# else:
# tabs = split_tab(tab_def)
# chain_dfs = []
# for tab in tabs:
# df, meta = tab[0], tab[1]
# # SOME DFs HAVE TOO MANY / UNUSED LEVELS...
# if len(df.columns.levels) > 2:
# df.columns = df.columns.droplevel(0)
# x, y = _get_axis_vars(df)
# df.replace('-', np.NaN, inplace=True)
# relabel_axes(df, meta, labels=labels)
# df = df.drop('Base', axis=1, level=1)
# try:
# df = df.applymap(lambda x: float(x.replace(',', '.')
# if isinstance(x, (str, unicode)) else x))
# except:
# msg = "Could not convert df values to float for table '{}'!"
# # warnings.warn(msg.format(name))
# chain_dfs.append(to_chain((df, x, y), meta))
# per_folder[name] = chain_dfs
# except:
# failed.append(name)
# print 'Conversion failed for:\n{}\n'.format(failed)
# print 'Subfolder conversion unsupported for:\n{}'.format(unsupported)
# return per_folder
##############################################################################
class Quantity(object):
"""
The Quantity object is the main Quantipy aggregation engine.
Consists of a link's data matrix representation and sectional defintion
of weight vector (wv), x-codes section (xsect) and y-codes section
(ysect). The instance methods handle creation, retrieval and manipulation
of the data input matrices and section definitions as well as the majority
of statistical calculations.
"""
# -------------------------------------------------
# Instance initialization
# -------------------------------------------------
def __init__(self, link, weight=None, use_meta=False, base_all=False):
# Collect information on wv, x- and y-section
self._uses_meta = use_meta
self.ds = self._convert_to_dataset(link)
self.d = self._data
self.base_all = base_all
self._dataidx = link.get_data().index
if self._uses_meta:
self.meta = self._meta
if list(self.meta().values()) == [None] * len(list(self.meta().values())):
self._uses_meta = False
self.meta = None
else:
self.meta = None
self._cache = link.get_cache()
self.f = link.filter
self.x = link.x
self.y = link.y
self.w = weight if weight is not None else '@1'
self.is_weighted = False
self.type = self._get_type()
if self.type == 'nested':
self.nest_def = Nest(self.y, self.d(), self.meta()).nest()
self._squeezed = False
self.idx_map = None
self.xdef = self.ydef = None
self.matrix = self._get_matrix()
self.is_empty = self.matrix.sum() == 0
self.switched = False
self.factorized = None
self.result = None
self.logical_conditions = []
self.cbase = self.rbase = None
self.comb_x = self.comb_y = None
self.miss_x = self.miss_y = None
self.calc_x = self.calc_y = None
self._has_x_margin = self._has_y_margin = False
def __repr__(self):
if self.result is not None:
return '%s' % (self.result)
else:
return 'Quantity - x: {}, xdef: {} y: {}, ydef: {}, w: {}'.format(
self.x, self.xdef, self.y, self.ydef, self.w)
# -------------------------------------------------
# Matrix creation and retrievel
# -------------------------------------------------
def _convert_to_dataset(self, link):
ds = qp.DataSet('')
ds._data = link.stack[link.data_key].data
ds._meta = link.get_meta()
return ds
def _data(self):
return self.ds._data
def _meta(self):
return self.ds._meta
def _get_type(self):
"""
Test variable type that can be "simple", "nested" or "array".
"""
if self._uses_meta:
masks = [self.x, self.y]
if any(mask in list(self.meta()['masks'].keys()) for mask in masks):
mask = {
True: self.x,
False: self.y}.get(self.x in list(self.meta()['masks'].keys()))
if self.meta()['masks'][mask]['type'] == 'array':
if self.x == '@':
self.x, self.y = self.y, self.x
return 'array'
elif '>' in self.y:
return 'nested'
else:
return 'simple'
else:
return 'simple'
def _is_multicode_array(self, mask_element):
return (
self.d()[mask_element].dtype == 'str'
)
def _get_wv(self):
"""
Returns the weight vector of the matrix.
"""
return self.d()[[self.w]].values
def weight(self):
"""
Weight by multiplying the indicator entries with the weight vector.
"""
self.matrix *= np.atleast_3d(self.wv)
# if self.is_weighted:
# self.matrix[:, 1:, 1:] *= np.atleast_3d(self.wv)
# else:
# self.matrix *= np.atleast_3d(self.wv)
# self.is_weighted = True
return None
def unweight(self):
"""
Remove any weighting by dividing the matrix by itself.
"""
self.matrix /= self.matrix
# self.matrix[:, 1:, 1:] /= self.matrix[:, 1:, 1:]
# self.is_weighted = False
return None
def _get_total(self):
"""
Return a vector of 1s for the matrix.
"""
return self.d()[['@1']].values
def _copy(self):
"""
Copy the Quantity instance, i.e. its data matrix, into a new object.
"""
m_copy = np.empty_like(self.matrix)
m_copy[:] = self.matrix
c = copy.copy(self)
c.matrix = m_copy
return c
def _get_response_codes(self, var):
"""
Query the meta specified codes values for a meta-using Quantity.
"""
if self.type == 'array':
rescodes = [v['value'] for v in self.meta()['lib']['values'][var]]
else:
values = emulate_meta(
self.meta(), self.meta()['columns'][var].get('values', None))
rescodes = [v['value'] for v in values]
return rescodes
def _get_response_texts(self, var, text_key=None):
"""
Query the meta specified text values for a meta-using Quantity.
"""
if text_key is None: text_key = 'main'
if self.type == 'array':
restexts = [v[text_key] for v in self.meta()['lib']['values'][var]]
else:
values = emulate_meta(
self.meta(), self.meta()['columns'][var].get('values', None))
restexts = [v['text'][text_key] for v in values]
return restexts
def _switch_axes(self):
"""
"""
if self.switched:
self.switched = False
self.matrix = self.matrix.swapaxes(1, 2)
else:
self.switched = True
self.matrix = self.matrix.swapaxes(2, 1)
self.xdef, self.ydef = self.ydef, self.xdef
self._x_indexers, self._y_indexers = self._y_indexers, self._x_indexers
self.comb_x, self.comb_y = self.comb_y, self.comb_x
self.miss_x, self.miss_y = self.miss_y, self.miss_x
return self
def _reset(self):
for prop in list(self.__dict__.keys()):
if prop in ['_uses_meta', 'base_all', '_dataidx', 'meta', '_cache',
'd', 'idx_map']:
pass
elif prop in ['_squeezed', 'switched']:
self.__dict__[prop] = False
else:
self.__dict__[prop] = None
self.result = None
return None
def swap(self, var, axis='x', inplace=True):
"""
Change the Quantity's x- or y-axis keeping filter and weight setup.
All edits and aggregation results will be removed during the swap.
Parameters
----------
var : str
New variable's name used in axis swap.
axis : {'x', 'y'}, default ``'x'``
The axis to swap.
inplace : bool, default ``True``
Whether to modify the Quantity inplace or return a new instance.
Returns
-------
swapped : New Quantity instance with exchanged x- or y-axis.
"""
if axis == 'x':
x = var
y = self.y
else:
x = self.x
y = var
f, w = self.f, self.w
if inplace:
swapped = self
else:
swapped = self._copy()
swapped._reset()
swapped.x, swapped.y = x, y
swapped.f, swapped.w = f, w
swapped.type = swapped._get_type()
swapped._get_matrix()
if not inplace:
return swapped
def rescale(self, scaling, drop=False):
"""
Modify the object's ``xdef`` property reflecting new value defintions.
Parameters
----------
scaling : dict
Mapping of old_code: new_code, given as of type int or float.
drop : bool, default False
If True, codes not included in the scaling dict will be excluded.
Returns
-------
self
"""
proper_scaling = {old_code: new_code for old_code, new_code
in list(scaling.items()) if old_code in self.xdef}
xdef_ref = [proper_scaling[code] if code in list(proper_scaling.keys())
else code for code in self.xdef]
if drop:
to_drop = [code for code in self.xdef if code not in
list(proper_scaling.keys())]
self.exclude(to_drop, axis='x')
self.xdef = xdef_ref
return self
def exclude(self, codes, axis='x'):
"""
Wrapper for _missingfy(...keep_codes=False, ..., keep_base=False, ...)
Excludes specified codes from aggregation.
"""
self._missingfy(codes, axis=axis, keep_base=False, inplace=True)
return self
def limit(self, codes, axis='x'):
"""
Wrapper for _missingfy(...keep_codes=True, ..., keep_base=True, ...)
Restrict the data matrix entires to contain the specified codes only.
"""
self._missingfy(codes, axis=axis, keep_codes=True, keep_base=True,
inplace=True)
return self
def filter(self, condition, keep_base=True, inplace=False):
"""
Use a Quantipy conditional expression to filter the data matrix entires.
"""
if inplace:
filtered = self
else:
filtered = self._copy()
qualified_rows = self._get_logic_qualifiers(condition)
valid_rows = self.idx_map[self.idx_map[:, 0] == 1][:, 1]
filter_idx = np.in1d(valid_rows, qualified_rows)
if keep_base:
filtered.matrix[~filter_idx, 1:, :] = np.NaN
else:
filtered.matrix[~filter_idx, :, :] = np.NaN
if not inplace:
return filtered
def _get_logic_qualifiers(self, condition):
if not isinstance(condition, dict):
column = self.x
logic = condition
else:
column = list(condition.keys())[0]
logic = list(condition.values())[0]
idx, logical_expression = get_logic_index(self.d()[column], logic, self.d())
logical_expression = logical_expression.split(':')[0]
if not column == self.x:
logical_expression = logical_expression.replace('x[', column+'[')
self.logical_conditions.append(logical_expression)
return idx
def _missingfy(self, codes, axis='x', keep_codes=False, keep_base=True,
indices=False, inplace=True):
"""
Clean matrix from entries preserving or modifying the weight vector.
Parameters
----------
codes : list
A list of codes to be considered in cleaning.
axis : {'x', 'y'}, default 'x'
The axis to clean codes on. Refers to the Link object's x- and y-
axes.
keep_codes : bool, default False
Controls whether the passed codes are kept or erased from the
Quantity matrix data entries.
keep_base: bool, default True
Controls whether the weight vector is set to np.NaN alongside
the x-section rows or remains unmodified.
indices: bool, default False
If ``True``, the data matrix indicies of the corresponding codes
will be returned as well.
inplace : bool, default True
Will overwrite self.matrix with the missingfied matrix by default.
If ``False``, the method will return a new np.array with the
modified entries.
Returns
-------
self or numpy.array (and optionally a list of int when ``indices=True``)
Either a new matrix is returned as numpy.array or the ``matrix``
property is modified inplace.
"""
if inplace:
missingfied = self
else:
missingfied = self._copy()
if axis == 'y' and self.y == '@' and not self.type == 'array':
return self
elif axis == 'y' and self.type == 'array':
ni_err = 'Cannot missingfy array mask element sections!'
raise NotImplementedError(ni_err)
else:
if axis == 'y':
missingfied._switch_axes()
mis_ix = missingfied._get_drop_idx(codes, keep_codes)
mis_ix = [code + 1 for code in mis_ix]
if mis_ix is not None:
for ix in mis_ix:
np.place(missingfied.matrix[:, ix],
missingfied.matrix[:, ix] > 0, np.NaN)
if not keep_base:
if axis == 'x':
self.miss_x = codes
else:
self.miss_y = codes
if self.type == 'array':
mask = np.nansum(missingfied.matrix[:, missingfied._x_indexers],
axis=1, keepdims=True)
mask /= mask
mask = mask > 0
else:
mask = np.nansum(np.sum(missingfied.matrix,
axis=1, keepdims=False),
axis=1, keepdims=True) > 0
missingfied.matrix[~mask] = np.NaN
if axis == 'y':
missingfied._switch_axes()
if inplace:
self.matrix = missingfied.matrix
if indices:
return mis_ix
else:
if indices:
return missingfied, mis_ix
else:
return missingfied
def _organize_global_missings(self, missings):
hidden = [c for c in list(missings.keys()) if missings[c] == 'hidden']
excluded = [c for c in list(missings.keys()) if missings[c] == 'excluded']
shown = [c for c in list(missings.keys()) if missings[c] == 'shown']
return hidden, excluded, shown
def _organize_stats_missings(self, missings):
excluded = [c for c in list(missings.keys())
if missings[c] in ['d.excluded', 'excluded']]
return excluded
def _autodrop_stats_missings(self):
if self.x == '@':
pass
elif self.ds._has_missings(self.x):
missings = self.ds._get_missings(self.x)
to_drop = self._organize_stats_missings(missings)
self.exclude(to_drop)
else:
pass
return None
def _clean_from_global_missings(self):
if self.x == '@':
pass
elif self.ds._has_missings(self.x):
missings = self.ds._get_missings(self.x)
hidden, excluded, shown = self._organize_global_missings(missings)
if excluded:
excluded_codes = excluded
excluded_idxer = self._missingfy(excluded, keep_base=False,
indices=True)
else:
excluded_codes, excluded_idxer = [], []
if hidden:
hidden_codes = hidden
hidden_idxer = self._get_drop_idx(hidden, keep=False)
hidden_idxer = [code + 1 for code in hidden_idxer]
else:
hidden_codes, hidden_idxer = [], []
dropped_codes = excluded_codes + hidden_codes
dropped_codes_idxer = excluded_idxer + hidden_idxer
self._x_indexers = [x_idx for x_idx in self._x_indexers
if x_idx not in dropped_codes_idxer]
self.matrix = self.matrix[:, [0] + self._x_indexers]
self.xdef = [x_c for x_c in self.xdef if x_c not in dropped_codes]
else:
pass
return None
def _get_drop_idx(self, codes, keep):
"""
Produces a list of indices referring to the given input matrix's axes
sections in order to erase data entries.
Parameters
----------
codes : list
Data codes that should be dropped from or kept in the matrix.
keep : boolean
Controls if the the passed code defintion is interpreted as
"codes to keep" or "codes to drop".
Returns
-------
drop_idx : list
List of x section matrix indices.
"""
if codes is None:
return None
else:
if keep:
return [self.xdef.index(code) for code in self.xdef
if code not in codes]
else:
return [self.xdef.index(code) for code in codes
if code in self.xdef]
def group(self, groups, axis='x', expand=None, complete=False):
"""
Build simple or logical net vectors, optionally keeping orginating codes.
Parameters
----------
groups : list, dict of lists or logic expression
The group/net code defintion(s) in form of...
* a simple list: ``[1, 2, 3]``
* a dict of list: ``{'grp A': [1, 2, 3], 'grp B': [4, 5, 6]}``
* a logical expression: ``not_any([1, 2])``
axis : {``'x'``, ``'y'``}, default ``'x'``
The axis to group codes on.
expand : {None, ``'before'``, ``'after'``}, default ``None``
If ``'before'``, the codes that are grouped will be kept and placed
before the grouped aggregation; vice versa for ``'after'``. Ignored
on logical expressions found in ``groups``.
complete : bool, default False
If True, codes that define the Link on the given ``axis`` but are
not present in the ``groups`` defintion(s) will be placed in their
natural position within the aggregation, respecting the value of
``expand``.
Returns
-------
None
"""
# check validity and clean combine instructions
if axis == 'y' and self.type == 'array':
ni_err_array = 'Array mask element sections cannot be combined.'
raise NotImplementedError(ni_err_array)
elif axis == 'y' and self.y == '@':
val_err = 'Total link has no y-axis codes to combine.'
raise ValueError(val_err)
grp_def = self._organize_grp_def(groups, expand, complete, axis)
combines = []
names = []
# generate the net vectors (+ possible expanded originating codes)
for grp in grp_def:
name, group, exp, logical = grp[0], grp[1], grp[2], grp[3]
one_code = len(group) == 1
if one_code and not logical:
vec = self._slice_vec(group[0], axis=axis)
elif not logical and not one_code:
vec, idx = self._grp_vec(group, axis=axis)
else:
vec = self._logic_vec(group)
if axis == 'y':
self._switch_axes()
if exp is not None:
m_idx = [ix for ix in self._x_indexers if ix not in idx]
m_idx = self._sort_indexer_as_codes(m_idx, group)
if exp == 'after':
names.extend(name)
names.extend([c for c in group])
combines.append(
np.concatenate([vec, self.matrix[:, m_idx]], axis=1))
else:
names.extend([c for c in group])
names.extend(name)
combines.append(
np.concatenate([self.matrix[:, m_idx], vec], axis=1))
else:
names.extend(name)
combines.append(vec)
if axis == 'y':
self._switch_axes()
# re-construct the combined data matrix
combines = np.concatenate(combines, axis=1)
if axis == 'y':
self._switch_axes()
combined_matrix = np.concatenate([self.matrix[:, [0]],
combines], axis=1)
if axis == 'y':
combined_matrix = combined_matrix.swapaxes(1, 2)
self._switch_axes()
# update the sectional information
new_sect_def = list(range(0, combined_matrix.shape[1] - 1))
if axis == 'x':
self.xdef = new_sect_def
self._x_indexers = self._get_x_indexers()
self.comb_x = names
else:
self.ydef = new_sect_def
self._y_indexers = self._get_y_indexers()
self.comb_y = names
self.matrix = combined_matrix
def _slice_vec(self, code, axis='x'):
'''
'''
if axis == 'x':
code_idx = self.xdef.index(code) + 1
else:
code_idx = self.ydef.index(code) + 1
if axis == 'x':
m_slice = self.matrix[:, [code_idx]]
else:
self._switch_axes()
m_slice = self.matrix[:, [code_idx]]
self._switch_axes()
return m_slice
def _grp_vec(self, codes, axis='x'):
netted, idx = self._missingfy(codes=codes, axis=axis,
keep_codes=True, keep_base=True,
indices=True, inplace=False)
if axis == 'y':
netted._switch_axes()
net_vec = np.nansum(netted.matrix[:, netted._x_indexers],
axis=1, keepdims=True)
net_vec /= net_vec
return net_vec, idx
def _logic_vec(self, condition):
"""
Create net vector of qualified rows based on passed condition.
"""
filtered = self.filter(condition=condition, inplace=False)
net_vec = np.nansum(filtered.matrix[:, self._x_indexers], axis=1,
keepdims=True)
net_vec /= net_vec
return net_vec
def _grp_type(self, grp_def):
if isinstance(grp_def, list):
if not isinstance(grp_def[0], (int, float)):
return 'block'
else:
return 'list'
elif isinstance(grp_def, tuple):
return 'logical'
elif isinstance(grp_def, dict):
return 'wildcard'
def _add_unused_codes(self, grp_def_list, axis):
'''
'''
query_codes = self.xdef if axis == 'x' else self.ydef
frame_lookup = {c: [[c], [c], None, False] for c in query_codes}
frame = [[code] for code in query_codes]
for grpdef_idx, grpdef in enumerate(grp_def_list):
for code in grpdef[1]:
if [code] in frame:
if grpdef not in frame:
frame[frame.index([code])] = grpdef
else:
frame[frame.index([code])] = '-'
frame = [code for code in frame if not code == '-']
for code in frame:
if code[0] in list(frame_lookup.keys()):
frame[frame.index([code[0]])] = frame_lookup[code[0]]
return frame
def _organize_grp_def(self, grp_def, method_expand, complete, axis):
"""
Sanitize a combine instruction list (of dicts): names, codes, expands.
"""
organized_def = []
codes_used = []
any_extensions = complete
any_logical = False
if method_expand is None and complete:
method_expand = 'before'
if not self._grp_type(grp_def) == 'block':
grp_def = [{'net': grp_def, 'expand': method_expand}]
for grp in grp_def:
if any(isinstance(val, (tuple, dict)) for val in list(grp.values())):
if complete:
ni_err = ('Logical expr. unsupported when complete=True. '
'Only list-type nets/groups can be completed.')
raise NotImplementedError(ni_err)
if 'expand' in list(grp.keys()):
del grp['expand']
expand = None
logical = True
else:
if 'expand' in list(grp.keys()):
grp = copy.deepcopy(grp)
expand = grp['expand']
if expand is None and complete:
expand = 'before'
del grp['expand']
else:
expand = method_expand
logical = False
organized_def.append([list(grp.keys()), list(grp.values())[0], expand, logical])
if expand:
any_extensions = True
if logical:
any_logical = True
codes_used.extend(list(grp.values())[0])
if not any_logical:
if len(set(codes_used)) != len(codes_used) and any_extensions:
ni_err_extensions = ('Same codes in multiple groups unsupported '
'with expand and/or complete =True.')
raise NotImplementedError(ni_err_extensions)
if complete:
return self._add_unused_codes(organized_def, axis)
else:
return organized_def
def _force_to_nparray(self):
"""
Convert the aggregation result into its numpy array equivalent.
"""
if isinstance(self.result, pd.DataFrame):
self.result = self.result.values
return True
else:
return False
def _attach_margins(self):
"""
Force margins back into the current Quantity.result if none are found.
"""
if not self._res_is_stat():
values = self.result
if not self._has_y_margin and not self.y == '@':
margins = False
values = np.concatenate([self.rbase[1:, :], values], 1)
else:
margins = True
if not self._has_x_margin:
margins = False
values = np.concatenate([self.cbase, values], 0)
else:
margins = True
self.result = values
return margins
else:
return False
def _organize_expr_def(self, expression, axis):
"""
"""
# Prepare expression parts and lookups for indexing the agg. result
val1, op, val2 = expression[0], expression[1], expression[2]
if self._res_is_stat():
idx_c = [self.current_agg]
offset = 0
else:
if axis == 'x':
idx_c = self.xdef if not self.comb_x else self.comb_x
else:
idx_c = self.ydef if not self.comb_y else self.comb_y
offset = 1
# Test expression validity and find np.array indices / prepare scalar
# values of the expression
idx_err = '"{}" not found in {}-axis.'
# [1] input is 1. scalar, 2. vector from the agg. result
if isinstance(val1, list):
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = val1[0]
val2 = idx_c.index(val2) + offset
expr_type = 'scalar_1'
# [2] input is 1. vector from the agg. result, 2. scalar
elif isinstance(val2, list):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
val1 = idx_c.index(val1) + offset
val2 = val2[0]
expr_type = 'scalar_2'
# [3] input is two vectors from the agg. result
elif not any(isinstance(val, list) for val in [val1, val2]):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = idx_c.index(val1) + offset
val2 = idx_c.index(val2) + offset
expr_type = 'vectors'
return val1, op, val2, expr_type, idx_c
@staticmethod
def constant(num):
return [num]
def calc(self, expression, axis='x', result_only=False):
"""
Compute (simple) aggregation level arithmetics.
"""
unsupported = ['cbase', 'rbase', 'summary', 'x_sum', 'y_sum']
if self.result is None:
raise ValueError('No aggregation to base calculation on.')
elif self.current_agg in unsupported:
ni_err = 'Aggregation type "{}" not supported.'
raise NotImplementedError(ni_err.format(self.current_agg))
elif axis not in ['x', 'y']:
raise ValueError('Invalid axis parameter: {}'.format(axis))
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
values = self.result
expr_name = list(expression.keys())[0]
if axis == 'x':
self.calc_x = expr_name
else:
self.calc_y = expr_name
values = values.T
expr = list(expression.values())[0]
v1, op, v2, exp_type, index_codes = self._organize_expr_def(expr, axis)
# ====================================================================
# TODO: generalize this calculation part so that it can "parse"
# arbitrary calculation rules given as nested or concatenated
# operators/codes sequences.
if exp_type == 'scalar_1':
val1, val2 = v1, values[[v2], :]
elif exp_type == 'scalar_2':
val1, val2 = values[[v1], :], v2
elif exp_type == 'vectors':
val1, val2 = values[[v1], :], values[[v2], :]
calc_res = op(val1, val2)
# ====================================================================
if axis == 'y':
calc_res = calc_res.T
ap_axis = 0 if axis == 'x' else 1
if result_only:
if not self._res_is_stat():
self.result = np.concatenate([self.result[[0], :], calc_res],
ap_axis)
else:
self.result = calc_res
else:
self.result = np.concatenate([self.result, calc_res], ap_axis)
if axis == 'x':
self.calc_x = index_codes + [self.calc_x]
else:
self.calc_y = index_codes + [self.calc_y]
self.cbase = self.result[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = self.result[:, [0]]
else:
self.rbase = None
if not self._res_is_stat():
self.current_agg = 'calc'
self._organize_margins(has_margin)
else:
self.current_agg = 'calc'
if is_df:
self.to_df()
return self
def count(self, axis=None, raw_sum=False, margin=True, as_df=True):
"""
Count entries over all cells or per axis margin.
Parameters
----------
axis : {None, 'x', 'y'}, deafult None
When axis is None, the frequency of all cells from the uni- or
multivariate distribution is presented. If the axis is specified
to be either 'x' or 'y' the margin per axis becomes the resulting
aggregation.
raw_sum : bool, default False
If True will perform a simple summation over the cells given the
axis parameter. This ignores net counting of qualifying answers in
favour of summing over all answers given when considering margins.
margin : bool, deafult True
Controls whether the margins of the aggregation result are shown.
This also applies to margin aggregations themselves, since they
contain a margin in (form of the total number of cases) as well.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of cell or margin counts
to the ``result`` property.
"""
if axis is None and raw_sum:
raise ValueError('Cannot calculate raw sum without axis.')
if axis is None:
self.current_agg = 'freq'
elif axis == 'x':
self.current_agg = 'cbase' if not raw_sum else 'x_sum'
elif axis == 'y':
self.current_agg = 'rbase' if not raw_sum else 'y_sum'
if not self.w == '@1':
self.weight()
if not self.is_empty or self._uses_meta:
counts = np.nansum(self.matrix, axis=0)
else:
counts = self._empty_result()
self.cbase = counts[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = counts[:, [0]]
else:
self.rbase = None
if axis is None:
self.result = counts
elif axis == 'x':
if not raw_sum:
self.result = counts[[0], :]
else:
self.result = np.nansum(counts[1:, :], axis=0, keepdims=True)
elif axis == 'y':
if not raw_sum:
self.result = counts[:, [0]]
else:
if self.x == '@' or self.y == '@':
self.result = counts[:, [0]]
else:
self.result = np.nansum(counts[:, 1:], axis=1, keepdims=True)
self._organize_margins(margin)
if as_df:
self.to_df()
self.unweight()
return self
def _empty_result(self):
if self._res_is_stat() or self.current_agg == 'summary':
self.factorized = 'x'
xdim = 1 if self._res_is_stat() else 8
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
else:
if self.xdef is not None:
if len(self.xdef) == 0:
xdim = 2
else:
xdim = len(self.xdef) + 1
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
elif self.xdef is None:
xdim = 2
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
return np.zeros((xdim, ydim))
def _effective_n(self, axis=None, margin=True):
self.weight()
effective = (np.nansum(self.matrix, axis=0)**2 /
np.nansum(self.matrix**2, axis=0))
self.unweight()
start_on = 0 if margin else 1
if axis is None:
return effective[start_on:, start_on:]
elif axis == 'x':
return effective[[0], start_on:]
else:
return effective[start_on:, [0]]
def summarize(self, stat='summary', axis='x', margin=True, as_df=True):
"""
Calculate distribution statistics across the given axis.
Parameters
----------
stat : {'summary', 'mean', 'median', 'var', 'stddev', 'sem', varcoeff',
'min', 'lower_q', 'upper_q', 'max'}, default 'summary'
The measure to calculate. Defaults to a summary output of the most
important sample statistics.
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, e.g. column vs. row
means.
margin : bool, default True
Controls whether statistic(s) of the marginal distribution are
shown.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of the descriptive (summary)
statistic(s) to the ``result`` property.
"""
self.current_agg = stat
if self.is_empty:
self.result = self._empty_result()
else:
self._autodrop_stats_missings()
if stat == 'summary':
stddev, mean, base = self._dispersion(axis, measure='sd',
_return_mean=True,
_return_base=True)
self.result = np.concatenate([
base, mean, stddev,
self._min(axis),
self._percentile(perc=0.25),
self._percentile(perc=0.50),
self._percentile(perc=0.75),
self._max(axis)
], axis=0)
elif stat == 'mean':
self.result = self._means(axis)
elif stat == 'var':
self.result = self._dispersion(axis, measure='var')
elif stat == 'stddev':
self.result = self._dispersion(axis, measure='sd')
elif stat == 'sem':
self.result = self._dispersion(axis, measure='sem')
elif stat == 'varcoeff':
self.result = self._dispersion(axis, measure='varcoeff')
elif stat == 'min':
self.result = self._min(axis)
elif stat == 'lower_q':
self.result = self._percentile(perc=0.25)
elif stat == 'median':
self.result = self._percentile(perc=0.5)
elif stat == 'upper_q':
self.result = self._percentile(perc=0.75)
elif stat == 'max':
self.result = self._max(axis)
self._organize_margins(margin)
if as_df:
self.to_df()
return self
def _factorize(self, axis='x', inplace=True):
self.factorized = axis
if inplace:
factorized = self
else:
factorized = self._copy()
if axis == 'y':
factorized._switch_axes()
np.copyto(factorized.matrix[:, 1:, :],
np.atleast_3d(factorized.xdef),
where=factorized.matrix[:, 1:, :]>0)
if not inplace:
return factorized
def _means(self, axis, _return_base=False):
fact = self._factorize(axis=axis, inplace=False)
if not self.w == '@1':
fact.weight()
fact_prod = np.nansum(fact.matrix, axis=0)
fact_prod_sum = np.nansum(fact_prod[1:, :], axis=0, keepdims=True)
bases = fact_prod[[0], :]
means = fact_prod_sum/bases
if axis == 'y':
self._switch_axes()
means = means.T
bases = bases.T
if _return_base:
return means, bases
else:
return means
def _dispersion(self, axis='x', measure='sd', _return_mean=False,
_return_base=False):
"""
Extracts measures of dispersion from the incoming distribution of
X vs. Y. Can return the arithm. mean by request as well. Dispersion
measure supported are standard deviation, variance, coeffiecient of
variation and standard error of the mean.
"""
means, bases = self._means(axis, _return_base=True)
unbiased_n = bases - 1
self.unweight()
factorized = self._factorize(axis, inplace=False)
factorized.matrix[:, 1:] -= means
factorized.matrix[:, 1:] *= factorized.matrix[:, 1:, :]
if not self.w == '@1':
factorized.weight()
diff_sqrt = np.nansum(factorized.matrix[:, 1:], axis=1)
disp = np.nansum(diff_sqrt/unbiased_n, axis=0, keepdims=True)
disp[disp <= 0] = np.NaN
disp[np.isinf(disp)] = np.NaN
if measure == 'sd':
disp = np.sqrt(disp)
elif measure == 'sem':
disp = np.sqrt(disp) / np.sqrt((unbiased_n + 1))
elif measure == 'varcoeff':
disp = np.sqrt(disp) / means
self.unweight()
if _return_mean and _return_base:
return disp, means, bases
elif _return_mean:
return disp, means
elif _return_base:
return disp, bases
else:
return disp
def _max(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
return np.nanmax(vals, axis=0, keepdims=True)
def _min(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
if 0 not in factorized.xdef: np.place(vals, vals == 0, np.inf)
return np.nanmin(vals, axis=0, keepdims=True)
def _percentile(self, axis='x', perc=0.5):
"""
Computes percentiles from the incoming distribution of X vs.Y and the
requested percentile value. The implementation mirrors the algorithm
used in SPSS Dimensions and the EXAMINE procedure in SPSS Statistics.
It based on the percentile defintion #6 (adjusted for survey weights)
in:
Hyndman, <NAME>. and <NAME> (1996) -
"Sample Quantiles in Statistical Packages",
The American Statistician, 50, No. 4, 361-365.
Parameters
----------
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, i.e. column vs. row
medians.
perc : float, default 0.5
Defines the percentile to be computed. Defaults to 0.5,
the sample median.
Returns
-------
percs : np.array
Numpy array storing percentile values.
"""
percs = []
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(np.nansum(factorized.matrix[:, 1:, :], axis=1,
keepdims=True), axis=1)
weights = (vals/vals)*self.wv
for shape_i in range(0, vals.shape[1]):
iter_weights = weights[:, shape_i]
iter_vals = vals[:, shape_i]
mask = ~np.isnan(iter_weights)
iter_weights = iter_weights[mask]
iter_vals = iter_vals[mask]
sorter = np.argsort(iter_vals)
iter_vals = np.take(iter_vals, sorter)
iter_weights = np.take(iter_weights, sorter)
iter_wsum = np.nansum(iter_weights, axis=0)
iter_wcsum = np.cumsum(iter_weights, axis=0)
k = (iter_wsum + 1.0) * perc
if iter_vals.shape[0] == 0:
percs.append(0.00)
elif iter_vals.shape[0] == 1:
percs.append(iter_vals[0])
elif iter_wcsum[0] > k:
wcsum_k = iter_wcsum[0]
percs.append(iter_vals[0])
elif iter_wcsum[-1] <= k:
percs.append(iter_vals[-1])
else:
wcsum_k = iter_wcsum[iter_wcsum <= k][-1]
p_k_idx = np.searchsorted(np.ndarray.flatten(iter_wcsum), wcsum_k)
p_k = iter_vals[p_k_idx]
p_k1 = iter_vals[p_k_idx+1]
w_k1 = iter_weights[p_k_idx+1]
excess = k - wcsum_k
if excess >= 1.0:
percs.append(p_k1)
else:
if w_k1 >= 1.0:
percs.append((1.0-excess)*p_k + excess*p_k1)
else:
percs.append((1.0-(excess/w_k1))*p_k +
(excess/w_k1)*p_k1)
return np.array(percs)[None, :]
def _organize_margins(self, margin):
if self._res_is_stat():
if self.type == 'array' or self.y == '@' or self.x == '@':
self._has_y_margin = self._has_x_margin = False
else:
if self.factorized == 'x':
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
else:
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
if self._res_is_margin():
if self.y == '@' or self.x == '@':
if self.current_agg in ['cbase', 'x_sum']:
self._has_y_margin = self._has_x_margin = False
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if self.current_agg in ['cbase', 'x_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
elif self.current_agg in ['freq', 'summary', 'calc']:
if self.type == 'array' or self.y == '@' or self.x == '@':
if not margin:
self.result = self.result[1:, :]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if not margin:
self.result = self.result[1:, 1:]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = True
else:
pass
def _sort_indexer_as_codes(self, indexer, codes):
mapping = sorted(zip(indexer, codes), key=lambda l: l[1])
return [i[0] for i in mapping]
def _get_y_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
if self.ydef is not None:
idxs = list(range(1, len(self.ydef)+1))
return self._sort_indexer_as_codes(idxs, self.ydef)
else:
return [1]
else:
y_indexers = []
xdef_len = len(self.xdef)
zero_based_ys = [idx for idx in range(0, xdef_len)]
for y_no in range(0, len(self.ydef)):
if y_no == 0:
y_indexers.append(zero_based_ys)
else:
y_indexers.append([idx + y_no * xdef_len
for idx in zero_based_ys])
return y_indexers
def _get_x_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
idxs = list(range(1, len(self.xdef)+1))
return self._sort_indexer_as_codes(idxs, self.xdef)
else:
x_indexers = []
upper_x_idx = len(self.ydef)
start_x_idx = [len(self.xdef) * offset
for offset in range(0, upper_x_idx)]
for x_no in range(0, len(self.xdef)):
x_indexers.append([idx + x_no for idx in start_x_idx])
return x_indexers
def _squeeze_dummies(self):
"""
Reshape and replace initial 2D dummy matrix into its 3D equivalent.
"""
self.wv = self.matrix[:, [-1]]
sects = []
if self.type == 'array':
x_sections = self._get_x_indexers()
y_sections = self._get_y_indexers()
y_total = np.nansum(self.matrix[:, x_sections], axis=1)
y_total /= y_total
y_total = y_total[:, None, :]
for sect in y_sections:
sect = self.matrix[:, sect]
sects.append(sect)
sects = np.dstack(sects)
self._squeezed = True
sects = np.concatenate([y_total, sects], axis=1)
self.matrix = sects
self._x_indexers = self._get_x_indexers()
self._y_indexers = []
elif self.type in ['simple', 'nested']:
x = self.matrix[:, :len(self.xdef)+1]
y = self.matrix[:, len(self.xdef)+1:-1]
for i in range(0, y.shape[1]):
sects.append(x * y[:, [i]])
sects = np.dstack(sects)
self._squeezed = True
self.matrix = sects
self._x_indexers = self._get_x_indexers()
self._y_indexers = self._get_y_indexers()
#=====================================================================
#THIS CAN SPEED UP PERFOMANCE BY A GOOD AMOUNT BUT STACK-SAVING
#TIME & SIZE WILL SUFFER. WE CAN DEL THE "SQUEEZED" COLLECTION AT
#SAVE STAGE.
#=====================================================================
# self._cache.set_obj(collection='squeezed',
# key=self.f+self.w+self.x+self.y,
# obj=(self.xdef, self.ydef,
# self._x_indexers, self._y_indexers,
# self.wv, self.matrix, self.idx_map))
def _get_matrix(self):
wv = self._cache.get_obj('weight_vectors', self.w)
if wv is None:
wv = self._get_wv()
self._cache.set_obj('weight_vectors', self.w, wv)
total = self._cache.get_obj('weight_vectors', '@1')
if total is None:
total = self._get_total()
self._cache.set_obj('weight_vectors', '@1', total)
if self.type == 'array':
xm, self.xdef, self.ydef = self._dummyfy()
self.matrix = np.concatenate((xm, wv), 1)
else:
if self.y == '@' or self.x == '@':
section = self.x if self.y == '@' else self.y
xm, self.xdef = self._cache.get_obj('matrices', section)
if xm is None:
xm, self.xdef = self._dummyfy(section)
self._cache.set_obj('matrices', section, (xm, self.xdef))
self.ydef = None
self.matrix = np.concatenate((total, xm, total, wv), 1)
else:
xm, self.xdef = self._cache.get_obj('matrices', self.x)
if xm is None:
xm, self.xdef = self._dummyfy(self.x)
self._cache.set_obj('matrices', self.x, (xm, self.xdef))
ym, self.ydef = self._cache.get_obj('matrices', self.y)
if ym is None:
ym, self.ydef = self._dummyfy(self.y)
self._cache.set_obj('matrices', self.y, (ym, self.ydef))
self.matrix = np.concatenate((total, xm, total, ym, wv), 1)
self.matrix = self.matrix[self._dataidx]
self.matrix = self._clean()
self._squeeze_dummies()
self._clean_from_global_missings()
return self.matrix
def _dummyfy(self, section=None):
if section is not None:
# i.e. Quantipy multicode data
if self.d()[section].dtype == 'str' or self.d()[section].dtype == 'object':
section_data = self.d()[section].astype('str').str.get_dummies(';')
if self._uses_meta:
res_codes = self._get_response_codes(section)
section_data.columns = [int(col) for col in section_data.columns]
section_data = section_data.reindex(columns=res_codes)
section_data.replace(np.NaN, 0, inplace=True)
if not self._uses_meta:
section_data.sort_index(axis=1, inplace=True)
# i.e. Quantipy single-coded/numerical data
else:
section_data = pd.get_dummies(self.d()[section])
if self._uses_meta and not self._is_raw_numeric(section):
res_codes = self._get_response_codes(section)
section_data = section_data.reindex(columns=res_codes)
section_data.replace(np.NaN, 0, inplace=True)
section_data.rename(
columns={
col: int(col)
if float(col).is_integer()
else col
for col in section_data.columns
},
inplace=True)
return section_data.values, section_data.columns.tolist()
elif section is None and self.type == 'array':
a_i = [i['source'].split('@')[-1] for i in
self.meta()['masks'][self.x]['items']]
a_res = self._get_response_codes(self.x)
dummies = []
if self._is_multicode_array(a_i[0]):
for i in a_i:
i_dummy = self.d()[i].str.get_dummies(';')
i_dummy.columns = [int(col) for col in i_dummy.columns]
dummies.append(i_dummy.reindex(columns=a_res))
else:
for i in a_i:
dummies.append(pd.get_dummies(self.d()[i]).reindex(columns=a_res))
a_data = pd.concat(dummies, axis=1)
return a_data.values, a_res, a_i
def _clean(self):
"""
Drop empty sectional rows from the matrix.
"""
mat = self.matrix.copy()
mat_indexer = np.expand_dims(self._dataidx, 1)
if not self.type == 'array':
xmask = (np.nansum(mat[:, 1:len(self.xdef)+1], axis=1) > 0)
if self.ydef is not None:
if self.base_all:
ymask = (np.nansum(mat[:, len(self.xdef)+1:-1], axis=1) > 0)
else:
ymask = (np.nansum(mat[:, len(self.xdef)+2:-1], axis=1) > 0)
self.idx_map = np.concatenate(
[np.expand_dims(xmask & ymask, 1), mat_indexer], axis=1)
return mat[xmask & ymask]
else:
self.idx_map = np.concatenate(
[np.expand_dims(xmask, 1), mat_indexer], axis=1)
return mat[xmask]
else:
mask = (np.nansum(mat[:, :-1], axis=1) > 0)
self.idx_map = np.concatenate(
[np.expand_dims(mask, 1), mat_indexer], axis=1)
return mat[mask]
def _is_raw_numeric(self, var):
return self.meta()['columns'][var]['type'] in ['int', 'float']
def _res_from_count(self):
return self._res_is_margin() or self.current_agg == 'freq'
def _res_from_summarize(self):
return self._res_is_stat() or self.current_agg == 'summary'
def _res_is_margin(self):
return self.current_agg in ['tbase', 'cbase', 'rbase', 'x_sum', 'y_sum']
def _res_is_stat(self):
return self.current_agg in ['mean', 'min', 'max', 'varcoeff', 'sem',
'stddev', 'var', 'median', 'upper_q',
'lower_q']
def to_df(self):
if self.current_agg == 'freq':
if not self.comb_x:
self.x_agg_vals = self.xdef
else:
self.x_agg_vals = self.comb_x
if not self.comb_y:
self.y_agg_vals = self.ydef
else:
self.y_agg_vals = self.comb_y
elif self.current_agg == 'calc':
if self.calc_x:
self.x_agg_vals = self.calc_x
self.y_agg_vals = self.ydef if not self.comb_y else self.comb_y
else:
self.x_agg_vals = self.xdef if not self.comb_x else self.comb_x
self.y_agg_vals = self.calc_y
elif self.current_agg == 'summary':
summary_vals = ['mean', 'stddev', 'min', '25%',
'median', '75%', 'max']
self.x_agg_vals = summary_vals
self.y_agg_vals = self.ydef
elif self.current_agg in ['x_sum', 'cbase']:
self.x_agg_vals = 'All' if self.current_agg == 'cbase' else 'sum'
self.y_agg_vals = self.ydef
elif self.current_agg in ['y_sum', 'rbase']:
self.x_agg_vals = self.xdef
self.y_agg_vals = 'All' if self.current_agg == 'rbase' else 'sum'
elif self._res_is_stat():
if self.factorized == 'x':
self.x_agg_vals = self.current_agg
self.y_agg_vals = self.ydef if not self.comb_y else self.comb_y
else:
self.x_agg_vals = self.xdef if not self.comb_x else self.comb_x
self.y_agg_vals = self.current_agg
# can this made smarter WITHOUT 1000000 IF-ELSEs above?:
if ((self.current_agg in ['freq', 'cbase', 'x_sum', 'summary', 'calc'] or
self._res_is_stat()) and not self.type == 'array'):
if self.y == '@' or self.x == '@':
self.y_agg_vals = '@'
df = pd.DataFrame(self.result)
idx, cols = self._make_multiindex()
df.index = idx
df.columns = cols
self.result = df if not self.x == '@' else df.T
if self.type == 'nested':
self._format_nested_axis()
return self
def _make_multiindex(self):
x_grps = self.x_agg_vals
y_grps = self.y_agg_vals
if not isinstance(x_grps, list):
x_grps = [x_grps]
if not isinstance(y_grps, list):
y_grps = [y_grps]
if not x_grps: x_grps = [None]
if not y_grps: y_grps = [None]
if self._has_x_margin:
x_grps = ['All'] + x_grps
if self._has_y_margin:
y_grps = ['All'] + y_grps
if self.type == 'array':
x_unit = y_unit = self.x
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
x_unit = self.x if not self.x == '@' else self.y
y_unit = self.y if not self.y == '@' else self.x
x_names = y_names = ['Question', 'Values']
x = [x_unit, x_grps]
y = [y_unit, y_grps]
index = pd.MultiIndex.from_product(x, names=x_names)
columns = pd.MultiIndex.from_product(y, names=y_names)
return index, columns
def _format_nested_axis(self):
nest_mi = self._make_nest_multiindex()
if not len(self.result.columns) > len(nest_mi.values):
self.result.columns = nest_mi
else:
total_mi_values = []
for var in self.nest_def['variables']:
total_mi_values += [var, -1]
total_mi = pd.MultiIndex.from_product(total_mi_values,
names=nest_mi.names)
full_nest_mi = nest_mi.union(total_mi)
for lvl, c in zip(list(range(1, len(full_nest_mi)+1, 2)),
self.nest_def['level_codes']):
full_nest_mi.set_levels(['All'] + c, level=lvl, inplace=True)
self.result.columns = full_nest_mi
return None
def _make_nest_multiindex(self):
values = []
names = ['Question', 'Values'] * (self.nest_def['levels'])
for lvl_var, lvl_c in zip(self.nest_def['variables'],
self.nest_def['level_codes']):
values.append(lvl_var)
values.append(lvl_c)
mi = pd.MultiIndex.from_product(values, names=names)
return mi
def normalize(self, on='y'):
"""
Convert a raw cell count result to its percentage representation.
Parameters
----------
on : {'y', 'x'}, default 'y'
Defines the base to normalize the result on. ``'y'`` will
produce column percentages, ``'x'`` will produce row
percentages.
Returns
-------
self
Updates an count-based aggregation in the ``result`` property.
"""
if self.x == '@':
on = 'y' if on == 'x' else 'x'
if on == 'y':
if self._has_y_margin or self.y == '@' or self.x == '@':
base = self.cbase
else:
if self._get_type() == 'array':
base = self.cbase
else:
base = self.cbase[:, 1:]
else:
if self._has_x_margin:
base = self.rbase
else:
base = self.rbase[1:, :]
if isinstance(self.result, pd.DataFrame):
if self.x == '@':
self.result = self.result.T
if on == 'y':
base = np.repeat(base, self.result.shape[0], axis=0)
else:
base = np.repeat(base, self.result.shape[1], axis=1)
self.result = self.result / base * 100
if self.x == '@':
self.result = self.result.T
return self
def rebase(self, reference, on='counts', overwrite_margins=True):
"""
"""
val_err = 'No frequency aggregation to rebase.'
if self.result is None:
raise ValueError(val_err)
elif self.current_agg != 'freq':
raise ValueError(val_err)
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
ref = self.swap(var=reference, inplace=False)
if self._sects_identical(self.xdef, ref.xdef):
pass
elif self._sects_different_order(self.xdef, ref.xdef):
ref.xdef = self.xdef
ref._x_indexers = ref._get_x_indexers()
ref.matrix = ref.matrix[:, ref._x_indexers + [0]]
elif self._sect_is_subset(self.xdef, ref.xdef):
ref.xdef = [code for code in ref.xdef if code in self.xdef]
ref._x_indexers = ref._sort_indexer_as_codes(ref._x_indexers,
self.xdef)
ref.matrix = ref.matrix[:, [0] + ref._x_indexers]
else:
idx_err = 'Axis defintion is not a subset of rebase reference.'
raise IndexError(idx_err)
ref_freq = ref.count(as_df=False)
self.result = (self.result/ref_freq.result) * 100
if overwrite_margins:
self.rbase = ref_freq.rbase
self.cbase = ref_freq.cbase
self._organize_margins(has_margin)
if is_df: self.to_df()
return self
@staticmethod
def _sects_identical(axdef1, axdef2):
return axdef1 == axdef2
@staticmethod
def _sects_different_order(axdef1, axdef2):
if not len(axdef1) == len(axdef2):
return False
else:
if (x for x in axdef1 if x in axdef2):
return True
else:
return False
@staticmethod
def _sect_is_subset(axdef1, axdef2):
return set(axdef1).intersection(set(axdef2)) > 0
class Test(object):
"""
The Quantipy Test object is a defined by a Link and the view name notation
string of a counts or means view. All auxiliary figures needed to arrive
at the test results are computed inside the instance of the object.
"""
def __init__(self, link, view_name_notation, test_total=False):
super(Test, self).__init__()
# Infer whether a mean or proportion test is being performed
view = link[view_name_notation]
if view.meta()['agg']['method'] == 'descriptives':
self.metric = 'means'
else:
self.metric = 'proportions'
self.invalid = None
self.no_pairs = None
self.no_diffs = None
self.parameters = None
self.test_total = test_total
self.mimic = None
self.level = None
# Calculate the required baseline measures for the test using the
# Quantity instance
self.Quantity = qp.Quantity(link, view.weights(), use_meta=True,
base_all=self.test_total)
self._set_baseline_aggregates(view)
# Set information about the incoming aggregation
# to be able to route correctly through the algorithms
# and re-construct a Quantipy-indexed pd.DataFrame
self.is_weighted = view.meta()['agg']['is_weighted']
self.has_calc = view.has_calc()
self.x = view.meta()['x']['name']
self.xdef = view.dataframe.index.get_level_values(1).tolist()
self.y = view.meta()['y']['name']
self.ydef = view.dataframe.columns.get_level_values(1).tolist()
columns_to_pair = ['@'] + self.ydef if self.test_total else self.ydef
self.ypairs = list(combinations(columns_to_pair, 2))
self.y_is_multi = view.meta()['y']['is_multi']
self.multiindex = (view.dataframe.index, view.dataframe.columns)
def __repr__(self):
return ('%s, total included: %s, test metric: %s, parameters: %s, '
'mimicked: %s, level: %s ')\
% (Test, self.test_total, self.metric, self.parameters,
self.mimic, self.level)
def _set_baseline_aggregates(self, view):
"""
Derive or recompute the basic values required by the ``Test`` instance.
"""
grps, exp, compl, calc, exclude, rescale = view.get_edit_params()
if exclude is not None:
self.Quantity.exclude(exclude)
if self.metric == 'proportions' and self.test_total and view._has_code_expr():
self.Quantity.group(grps, expand=exp, complete=compl)
if self.metric == 'means':
aggs = self.Quantity._dispersion(_return_mean=True,
_return_base=True)
self.sd, self.values, self.cbases = aggs[0], aggs[1], aggs[2]
if not self.test_total:
self.sd = self.sd[:, 1:]
self.values = self.values[:, 1:]
self.cbases = self.cbases[:, 1:]
elif self.metric == 'proportions':
if not self.test_total:
self.values = view.dataframe.values.copy()
self.cbases = view.cbases[:, 1:]
self.rbases = view.rbases[1:, :]
self.tbase = view.cbases[0, 0]
else:
agg = self.Quantity.count(margin=True, as_df=False)
if calc is not None:
calc_only = view._kwargs.get('calc_only', False)
self.Quantity.calc(calc, axis='x', result_only=calc_only)
self.values = agg.result[1:, :]
self.cbases = agg.cbase
self.rbases = agg.rbase[1:, :]
self.tbase = agg.cbase[0, 0]
def set_params(self, test_total=False, level='mid', mimic='Dim', testtype='pooled',
use_ebase=True, ovlp_correc=True, cwi_filter=False,
flag_bases=None):
"""
Sets the test algorithm parameters and defines the type of test.
This method sets the test's global parameters and derives the
necessary measures for the computation of the test statistic.
The default values correspond to the SPSS Dimensions Column Tests
algorithms that control for bias introduced by weighting and
overlapping samples in the column pairs of multi-coded questions.
.. note:: The Dimensions implementation uses variance pooling.
Parameters
----------
test_total : bool, default False
If set to True, the test algorithms will also include an existent
total (@-) version of the original link and test against the
unconditial data distribution.
level : str or float, default 'mid'
The level of significance given either as per 'low' = 0.1,
'mid' = 0.05, 'high' = 0.01 or as specific float, e.g. 0.15.
mimic : {'askia', 'Dim'} default='Dim'
Will instruct the mimicking of a software specific test.
testtype : str, default 'pooled'
Global definition of the tests.
use_ebase : bool, default True
If True, will use the effective sample sizes instead of the
the simple weighted ones when testing a weighted aggregation.
ovlp_correc : bool, default True
If True, will consider and correct for respondent overlap when
testing between multi-coded column pairs.
cwi_filter : bool, default False
If True, will check an incoming count aggregation for cells that
fall below a treshhold comparison aggregation that assumes counts
to be independent.
flag_bases : list of two int, default None
If provided, the output dataframe will replace results that have
been calculated on (eff.) bases below the first int with ``'**'``
and mark results in columns with bases below the second int with
``'*'``
Returns
-------
self
"""
# Check if the aggregation is non-empty
# and that there are >1 populated columns
if np.nansum(self.values) == 0 or len(self.ydef) == 1:
self.invalid = True
if np.nansum(self.values) == 0:
self.no_diffs = True
if len(self.ydef) == 1:
self.no_pairs = True
self.mimic = mimic
self.comparevalue, self.level = self._convert_level(level)
else:
# Set global test algorithm parameters
self.invalid = False
self.no_diffs = False
self.no_pairs = False
valid_mimics = ['Dim', 'askia']
if mimic not in valid_mimics:
raise ValueError('Failed to mimic: "%s". Select from: %s\n'
% (mimic, valid_mimics))
else:
self.mimic = mimic
if self.mimic == 'askia':
self.parameters = {'testtype': 'unpooled',
'use_ebase': False,
'ovlp_correc': False,
'cwi_filter': True,
'base_flags': None}
self.test_total = False
elif self.mimic == 'Dim':
self.parameters = {'testtype': 'pooled',
'use_ebase': True,
'ovlp_correc': True,
'cwi_filter': False,
'base_flags': flag_bases}
self.level = level
self.comparevalue, self.level = self._convert_level(level)
# Get value differences between column pairings
if self.metric == 'means':
self.valdiffs = np.array(
[m1 - m2 for m1, m2 in combinations(self.values[0], 2)])
if self.metric == 'proportions':
# special to askia testing: counts-when-independent filtering
if cwi_filter:
self.values = self._cwi()
props = (self.values / self.cbases).T
self.valdiffs = np.array([p1 - p2 for p1, p2
in combinations(props, 2)]).T
# Set test specific measures for Dimensions-like testing:
# [1] effective base usage
if use_ebase and self.is_weighted:
if not self.test_total:
self.ebases = self.Quantity._effective_n(axis='x', margin=False)
else:
self.ebases = self.Quantity._effective_n(axis='x', margin=True)
else:
self.ebases = self.cbases
# [2] overlap correction
if self.y_is_multi and self.parameters['ovlp_correc']:
self.overlap = self._overlap()
else:
self.overlap = np.zeros(self.valdiffs.shape)
# [3] base flags
if flag_bases:
self.flags = {'min': flag_bases[0],
'small': flag_bases[1]}
self.flags['flagged_bases'] = self._get_base_flags()
else:
self.flags = None
return self
# -------------------------------------------------
# Main algorithm methods to compute test statistics
# -------------------------------------------------
def run(self):
"""
Performs the testing algorithm and creates an output pd.DataFrame.
The output is indexed according to Quantipy's Questions->Values
convention. Significant results between columns are presented as
lists of integer y-axis codes where the column with the higher value
is holding the codes of the columns with the lower values. NaN is
indicating that a cell is not holding any sig. higher values
compared to the others.
"""
if not self.invalid:
sigs = self.get_sig()
return self._output(sigs)
else:
return self._empty_output()
def get_sig(self):
"""
TODO: implement returning tstats only.
"""
stat = self.get_statistic()
stat = self._convert_statistic(stat)
if self.metric == 'means':
diffs = pd.DataFrame(self.valdiffs, index=self.ypairs, columns=self.xdef).T
elif self.metric == 'proportions':
stat = pd.DataFrame(stat, index=self.xdef, columns=self.ypairs)
diffs = pd.DataFrame(self.valdiffs, index=self.xdef, columns=self.ypairs)
if self.mimic == 'Dim':
return diffs[(diffs != 0) & (stat < self.comparevalue)]
elif self.mimic == 'askia':
return diffs[(diffs != 0) & (stat > self.comparevalue)]
def get_statistic(self):
"""
Returns the test statistic of the algorithm.
"""
return self.valdiffs / self.get_se()
def get_se(self):
"""
Compute the standard error (se) estimate of the tested metric.
The calculation of the se is defined by the parameters of the setup.
The main difference is the handling of variances. **unpooled**
implicitly assumes variance inhomogenity between the column pairing's
samples. **pooled** treats variances effectively as equal.
"""
if self.metric == 'means':
if self.parameters['testtype'] == 'unpooled':
return self._se_mean_unpooled()
elif self.parameters['testtype'] == 'pooled':
return self._se_mean_pooled()
elif self.metric == 'proportions':
if self.parameters['testtype'] == 'unpooled':
return self._se_prop_unpooled()
if self.parameters['testtype'] == 'pooled':
return self._se_prop_pooled()
# -------------------------------------------------
# Conversion methods for levels and statistics
# -------------------------------------------------
def _convert_statistic(self, teststat):
"""
Convert test statistics to match the decision rule of the test logic.
Either transforms to p-values or returns the absolute value of the
statistic, depending on the decision rule of the test.
This is used to mimic other software packages as some tests'
decision rules check test-statistic against pre-defined treshholds
while others check sig. level against p-value.
"""
if self.mimic == 'Dim':
ebases_pairs = [eb1 + eb2 for eb1, eb2
in combinations(self.ebases[0], 2)]
dof = ebases_pairs - self.overlap - 2
dof[dof <= 1] = np.NaN
return get_pval(dof, teststat)[1]
elif self.mimic == 'askia':
return abs(teststat)
def _convert_level(self, level):
"""
Determines the comparison value for the test's decision rule.
Checks whether the level of test is a string that defines low, medium,
or high significance or an "actual" level of significance and
converts it to a comparison level/significance level tuple.
This is used to mimic other software packages as some test's
decision rules check test-statistic against pre-defined treshholds
while others check sig. level against p-value.
"""
if isinstance(level, str):
if level == 'low':
if self.mimic == 'Dim':
comparevalue = siglevel = 0.10
elif self.mimic == 'askia':
comparevalue = 1.65
siglevel = 0.10
elif level == 'mid':
if self.mimic == 'Dim':
comparevalue = siglevel = 0.05
elif self.mimic == 'askia':
comparevalue = 1.96
siglevel = 0.05
elif level == 'high':
if self.mimic == 'Dim':
comparevalue = siglevel = 0.01
elif self.mimic == 'askia':
comparevalue = 2.576
siglevel = 0.01
else:
if self.mimic == 'Dim':
comparevalue = siglevel = level
elif self.mimic == 'askia':
comparevalue = 1.65
siglevel = 0.10
return comparevalue, siglevel
# -------------------------------------------------
# Standard error estimates calculation methods
# -------------------------------------------------
def _se_prop_unpooled(self):
"""
Estimated standard errors of prop. diff. (unpool. var.) per col. pair.
"""
props = self.values/self.cbases
unp_sd = ((props*(1-props))/self.cbases).T
return np.array([np.sqrt(cat1 + cat2)
for cat1, cat2 in combinations(unp_sd, 2)]).T
def _se_mean_unpooled(self):
"""
Estimated standard errors of mean diff. (unpool. var.) per col. pair.
"""
sd_base_ratio = self.sd / self.cbases
return np.array([np.sqrt(sd_b_r1 + sd_b_r2)
for sd_b_r1, sd_b_r2
in combinations(sd_base_ratio[0], 2)])[None, :]
def _se_prop_pooled(self):
"""
Estimated standard errors of prop. diff. (pooled var.) per col. pair.
Controlling for effective base sizes and overlap responses is
supported and applied as defined by the test's parameters setup.
"""
ebases_correc_pairs = np.array([1 / x + 1 / y
for x, y
in combinations(self.ebases[0], 2)])
if self.y_is_multi and self.parameters['ovlp_correc']:
ovlp_correc_pairs = ((2 * self.overlap) /
[x * y for x, y
in combinations(self.ebases[0], 2)])
else:
ovlp_correc_pairs = self.overlap
counts_sum_pairs = np.array(
[c1 + c2 for c1, c2 in combinations(self.values.T, 2)])
bases_sum_pairs = np.expand_dims(
[b1 + b2 for b1, b2 in combinations(self.cbases[0], 2)], 1)
pooled_props = (counts_sum_pairs/bases_sum_pairs).T
return (np.sqrt(pooled_props * (1 - pooled_props) *
(np.array(ebases_correc_pairs - ovlp_correc_pairs))))
def _se_mean_pooled(self):
"""
Estimated standard errors of mean diff. (pooled var.) per col. pair.
Controlling for effective base sizes and overlap responses is
supported and applied as defined by the test's parameters setup.
"""
ssw_base_ratios = self._sum_sq_w(base_ratio=True)
enum = np.nan_to_num((self.sd ** 2) * (self.cbases-1))
denom = self.cbases-ssw_base_ratios
enum_pairs = np.array([enum1 + enum2
for enum1, enum2
in combinations(enum[0], 2)])
denom_pairs = np.array([denom1 + denom2
for denom1, denom2
in combinations(denom[0], 2)])
ebases_correc_pairs = np.array([1/x + 1/y
for x, y
in combinations(self.ebases[0], 2)])
if self.y_is_multi and self.parameters['ovlp_correc']:
ovlp_correc_pairs = ((2*self.overlap) /
[x * y for x, y
in combinations(self.ebases[0], 2)])
else:
ovlp_correc_pairs = self.overlap[None, :]
return (np.sqrt((enum_pairs/denom_pairs) *
(ebases_correc_pairs - ovlp_correc_pairs)))
# -------------------------------------------------
# Specific algorithm values & test option measures
# -------------------------------------------------
def _sum_sq_w(self, base_ratio=True):
"""
"""
if not self.Quantity.w == '@1':
self.Quantity.weight()
if not self.test_total:
ssw = np.nansum(self.Quantity.matrix ** 2, axis=0)[[0], 1:]
else:
ssw = np.nansum(self.Quantity.matrix ** 2, axis=0)[[0], :]
if base_ratio:
return ssw/self.cbases
else:
return ssw
def _cwi(self, threshold=5, as_df=False):
"""
Derives the count distribution assuming independence between columns.
"""
c_col_n = self.cbases
c_cell_n = self.values
t_col_n = self.tbase
if self.rbases.shape[1] > 1:
t_cell_n = self.rbases[1:, :]
else:
t_cell_n = self.rbases[0]
np.place(t_col_n, t_col_n == 0, np.NaN)
np.place(t_cell_n, t_cell_n == 0, np.NaN)
np.place(c_col_n, c_col_n == 0, np.NaN)
np.place(c_cell_n, c_cell_n == 0, np.NaN)
cwi = (t_cell_n * c_col_n) / t_col_n
cwi[cwi < threshold] = np.NaN
if as_df:
return pd.DataFrame(c_cell_n + cwi - cwi,
index=self.xdef, columns=self.ydef)
else:
return c_cell_n + cwi - cwi
def _overlap(self):
if self.is_weighted:
self.Quantity.weight()
m = self.Quantity.matrix.copy()
m = np.nansum(m, 1) if self.test_total else np.nansum(m[:, 1:, 1:], 1)
if not self.is_weighted:
m /= m
m[m == 0] = np.NaN
col_pairs = list(combinations(list(range(0, m.shape[1])), 2))
if self.parameters['use_ebase'] and self.is_weighted:
# Overlap computation when effective base is being used
w_sum_sq = np.array([np.nansum(m[:, [c1]] + m[:, [c2]], axis=0)**2
for c1, c2 in col_pairs])
w_sq_sum = np.array([np.nansum(m[:, [c1]]**2 + m[:, [c2]]**2, axis=0)
for c1, c2 in col_pairs])
return np.nan_to_num((w_sum_sq/w_sq_sum)/2).T
else:
# Overlap with simple weighted/unweighted base size
ovlp = np.array([np.nansum(m[:, [c1]] + m[:, [c2]], axis=0)
for c1, c2 in col_pairs])
return (np.nan_to_num(ovlp)/2).T
def _get_base_flags(self):
bases = self.ebases[0]
small = self.flags['small']
minimum = self.flags['min']
flags = []
for base in bases:
if base >= small:
flags.append('')
elif base < small and base >= minimum:
flags.append('*')
else:
flags.append('**')
return flags
# -------------------------------------------------
# Output creation
# -------------------------------------------------
def _output(self, sigs):
res = {y: {x: [] for x in self.xdef} for y in self.ydef}
test_columns = ['@'] + self.ydef if self.test_total else self.ydef
for col, val in sigs.items():
if self._flags_exist():
b1ix, b2ix = test_columns.index(col[0]), test_columns.index(col[1])
b1_ok = self.flags['flagged_bases'][b1ix] != '**'
b2_ok = self.flags['flagged_bases'][b2ix] != '**'
else:
b1_ok, b2_ok = True, True
for row, v in val.items():
if v > 0:
if b2_ok:
if col[0] == '@':
res[col[1]][row].append('@H')
else:
res[col[0]][row].append(col[1])
if v < 0:
if b1_ok:
if col[0] == '@':
res[col[1]][row].append('@L')
else:
res[col[1]][row].append(col[0])
test = pd.DataFrame(res).applymap(lambda x: str(x))
test = test.reindex(index=self.xdef, columns=self.ydef)
if self._flags_exist():
test = self._apply_base_flags(test)
test.replace('[]*', '*', inplace=True)
test.replace('[]', np.NaN, inplace=True)
# removing test results on post-aggregation rows [calc()]
if self.has_calc:
if len(test.index) > 1:
test.iloc[-1:, :] = np.NaN
else:
test.iloc[:, :] = np.NaN
test.index, test.columns = self.multiindex[0], self.multiindex[1]
return test
def _empty_output(self):
"""
"""
values = self.values
if self.metric == 'proportions':
if self.no_pairs or self.no_diffs:
values[:] = np.NaN
if values.shape == (1, 1) or values.shape == (1, 0):
values = [np.NaN]
if self.metric == 'means':
if self.no_pairs:
values = [np.NaN]
if self.no_diffs and not self.no_pairs:
values[:] = np.NaN
return pd.DataFrame(values,
index=self.multiindex[0],
columns=self.multiindex[1])
def _flags_exist(self):
return (self.flags is not None and
not all(self.flags['flagged_bases']) == '')
def _apply_base_flags(self, sigres, replace=True):
flags = self.flags['flagged_bases']
if self.test_total: flags = flags[1:]
for res_col, flag in zip(sigres.columns, flags):
if flag == '**':
if replace:
sigres[res_col] = flag
else:
sigres[res_col] = sigres[res_col] + flag
elif flag == '*':
sigres[res_col] = sigres[res_col] + flag
return sigres
class Nest(object):
"""
Description of class...
"""
def __init__(self, nest, data, meta):
self.data = data
self.meta = meta
self.name = nest
self.variables = nest.split('>')
self.levels = len(self.variables)
self.level_codes = []
self.code_maps = None
self._needs_multi = self._any_multicoded()
def nest(self):
self._get_nested_meta()
self._get_code_maps()
interlocked = self._interlock_codes()
if not self.name in self.data.columns:
recode_map = {code: intersection(code_pair) for code, code_pair
in enumerate(interlocked, start=1)}
self.data[self.name] = np.NaN
self.data[self.name] = recode(self.meta, self.data,
target=self.name, mapper=recode_map)
nest_info = {'variables': self.variables,
'level_codes': self.level_codes,
'levels': self.levels}
return nest_info
def _any_multicoded(self):
return any(self.data[self.variables].dtypes == 'str')
def _get_code_maps(self):
code_maps = []
for level, var in enumerate(self.variables):
mapping = [{var: [int(code)]} for code
in self.level_codes[level]]
code_maps.append(mapping)
self.code_maps = code_maps
return None
def _interlock_codes(self):
return list(product(*self.code_maps))
def _get_nested_meta(self):
meta_dict = {}
qtext, valtexts = self._interlock_texts()
meta_dict['type'] = 'delimited set' if self._needs_multi else 'single'
meta_dict['text'] = {'en-GB': '>'.join(qtext[0])}
meta_dict['values'] = [{'text' : {'en-GB': '>'.join(valtext)},
'value': c}
for c, valtext
in enumerate(valtexts, start=1)]
self.meta['columns'][self.name] = meta_dict
return None
def _interlock_texts(self):
all_valtexts = []
all_qtexts = []
for var in self.variables:
var_valtexts = []
values = self.meta['columns'][var]['values']
all_qtexts.append(list(self.meta['columns'][var]['text'].values()))
for value in values:
var_valtexts.append(list(value['text'].values())[0])
all_valtexts.append(var_valtexts)
self.level_codes.append([code['value'] for code in values])
interlocked_valtexts = list(product(*all_valtexts))
interlocked_qtexts = list(product(*all_qtexts))
return interlocked_qtexts, interlocked_valtexts
##############################################################################
class Multivariate(object):
def __init__(self):
pass
def _select_variables(self, x, y=None, w=None, drop_listwise=False):
x_vars, y_vars = [], []
if not isinstance(x, list): x = [x]
if not isinstance(y, list) and not y=='@': y = [y]
if w is None: w = '@1'
wrong_var_sel_1_on_1 = 'Can only analyze 1-to-1 relationships.'
if self.analysis == 'Reduction' and (not (len(x) == 1 and len(y) == 1) or y=='@'):
raise AttributeError(wrong_var_sel_1_on_1)
for var in x:
if self.ds._is_array(var):
if self.analysis == 'Reduction': raise AttributeError(wrong_var_sel_1_on_1)
x_a_items = self.ds._get_itemmap(var, non_mapped='items')
x_vars += x_a_items
else:
x_vars.append(var)
if y and not y == '@':
for var in y:
if self.ds._is_array(var):
if self.analysis == 'Reduction': raise AttributeError(wrong_var_sel_1_on_1)
y_a_items = self.ds._get_itemmap(var, non_mapped='items')
y_vars += y_a_items
else:
y_vars.append(var)
elif y == '@':
y_vars = x_vars
if x_vars == y_vars or y is None:
data_slice = x_vars + [w]
else:
data_slice = x_vars + y_vars + [w]
if self.analysis == 'Relations' and y != '@':
self.x = self.y = x_vars + y_vars
self._org_x, self._org_y = x_vars, y_vars
else:
self.x = self._org_x = x_vars
self.y = self._org_y = y_vars
self.w = w
self._analysisdata = self.ds[data_slice]
self._drop_missings()
if drop_listwise:
self._analysisdata.dropna(inplace=True)
valid = self._analysisdata.index
self.ds._data = self.ds._data.loc[valid, :]
return None
def _drop_missings(self):
data = self._analysisdata.copy()
for var in data.columns:
if self.ds._has_missings(var):
drop = self.ds._get_missing_list(var, globally=False)
data[var].replace(drop, np.NaN, inplace=True)
self._analysisdata = data
return None
def _has_analysis_data(self):
if not hasattr(self, '_analysisdata'):
raise AttributeError('No analysis variables assigned!')
def _has_yvar(self):
if self.y is None:
raise AttributeError('Must select at least one y-variable or '
'"@"-matrix indicator!')
def _get_quantities(self, create='all'):
crossed_quantities = []
single_quantities = []
helper_stack = qp.Stack()
helper_stack.add_data(self.ds.name, self.ds._data, self.ds._meta)
w = self.w if self.w != '@1' else None
for x, y in product(self.x, self.y):
helper_stack.add_link(x=x, y=y)
l = helper_stack[self.ds.name]['no_filter'][x][y]
crossed_quantities.append(qp.Quantity(l, weight=w))
for x in self._org_x+self._org_y:
helper_stack.add_link(x=x, y='@')
l = helper_stack[self.ds.name]['no_filter'][x]['@']
single_quantities.append(qp.Quantity(l, weight=w))
self.single_quantities = single_quantities
self.crossed_quantities = crossed_quantities
return None
class Reductions(Multivariate):
def __init__(self, dataset):
self.ds = dataset
self.single_quantities = None
self.crossed_quantities = None
self.analysis = 'Reduction'
def plot(self, type, point_coords):
plt.set_autoscale_on = False
plt.figure(figsize=(5, 5))
plt.xlim([-1, 1])
plt.ylim([-1, 1])
#plt.axvline(x=0.0, c='grey', ls='solid', linewidth=0.9)
#plt.axhline(y=0.0, c='grey', ls='solid', linewidth=0.9)
x = plt.scatter(point_coords['x'][0], point_coords['x'][1],
edgecolor='w', marker='o', c='red', s=20)
y = plt.scatter(point_coords['y'][0], point_coords['y'][1],
edgecolor='k', marker='^', c='lightgrey', s=20)
fig = x.get_figure()
# print fig.get_axes()[0].grid()
fig.get_axes()[0].tick_params(labelsize=6)
fig.get_axes()[0].patch.set_facecolor('w')
fig.get_axes()[0].grid(which='major', linestyle='solid', color='grey',
linewidth=0.6)
fig.get_axes()[0].xaxis.get_major_ticks()[0].label1.set_visible(False)
x0 = fig.get_axes()[0].get_position().x0
y0 = fig.get_axes()[0].get_position().y0
x1 = fig.get_axes()[0].get_position().x1
y1 = fig.get_axes()[0].get_position().y1
text = 'Correspondence map'
plt.figtext(x0+0.015, 1.09-y0, text, fontsize=12, color='w',
fontweight='bold', verticalalignment='top',
bbox={'facecolor':'red', 'alpha': 0.8, 'edgecolor': 'w',
'pad': 10})
label_map = self._get_point_label_map('CA', point_coords)
for axis in list(label_map.keys()):
for lab, coord in list(label_map[axis].items()):
plt.annotate(lab, coord, ha='left', va='bottom',
fontsize=6)
plt.legend((x, y), (self.x[0], self.y[0]),
loc='best', bbox_to_anchor=(1.325, 1.07),
ncol=2, fontsize=6, title=' ')
x_codes, x_texts = self.ds._get_valuemap(self.x[0], non_mapped='lists')
y_codes, y_texts = self.ds._get_valuemap(self.y[0], non_mapped='lists')
text = ' '*80
for var in zip(x_codes, x_texts):
text += '\n{}: {}\n'.format(var[0], var[1])
fig.text(1.06-x0, 0.85, text, fontsize=5, verticalalignment='top',
bbox={'facecolor':'red',
'edgecolor': 'w', 'pad': 10})
x_len = len(x_codes)
text = ' '*80
for var in zip(y_codes, y_texts):
text += '\n{}: {}\n'.format(var[0], var[1])
test = fig.text(1.06-x0, 0.85-((x_len)*0.0155)-((x_len)*0.0155)-0.05, text, fontsize=5, verticalalignment='top',
bbox={'facecolor': 'lightgrey', 'alpha': 0.65,
'edgecolor': 'w', 'pad': 10})
logo = Image.open('C:/Users/alt/Documents/IPython Notebooks/Designs/Multivariate class/__resources__/YG_logo.png')
newax = fig.add_axes([x0+0.005, y0-0.25, 0.1, 0.1], anchor='NE', zorder=-1)
newax.imshow(logo)
newax.axis('off')
fig.savefig(self.ds.path + 'correspond.png', bbox_inches='tight', dpi=300)
def correspondence(self, x, y, w=None, norm='sym', diags=True, plot=True):
"""
Perform a (multiple) correspondence analysis.
Parameters
----------
norm : {'sym', 'princ'}, default 'sym'
<DESCP>
summary : bool, default True
If True, the output will contain a dataframe that summarizes core
information about the Inertia decomposition.
plot : bool, default False
If set to True, a correspondence map plot will be saved in the
Stack's data path location.
Returns
-------
results: pd.DataFrame
Summary of analysis results.
"""
self._select_variables(x, y, w)
self._get_quantities()
# 1. Chi^2 analysis
obs, exp = self.expected_counts(x=x, y=y, return_observed=True)
chisq, sig = self.chi_sq(x=x, y=y, sig=True)
inertia = chisq / np.nansum(obs)
# 2. svd on standardized residuals
std_residuals = ((obs - exp) / np.sqrt(exp)) / np.sqrt(np.nansum(obs))
sv, row_eigen_mat, col_eigen_mat, ev = self._svd(std_residuals)
# 3. row and column coordinates
a = 0.5 if norm == 'sym' else 1.0
row_mass = self.mass(x=x, y=y, margin='x')
col_mass = self.mass(x=x, y=y, margin='y')
dim = min(row_mass.shape[0]-1, col_mass.shape[0]-1)
row_sc = (row_eigen_mat * sv[:, 0] ** a) / np.sqrt(row_mass)
col_sc = (col_eigen_mat.T * sv[:, 0] ** a) / np.sqrt(col_mass)
if plot:
# prep coordinates for plot
item_sep = len(self.single_quantities[0].xdef)
dim1_c = [r_s[0] for r_s in row_sc] + [c_s[0] for c_s in col_sc]
# dim2_c = [r_s[1]*(-1) for r_s in row_sc] + [c_s[1]*(-1) for c_s in col_sc]
dim2_c = [r_s[1] for r_s in row_sc] + [c_s[1] for c_s in col_sc]
dim1_xitem, dim2_xitem = dim1_c[:item_sep], dim2_c[:item_sep]
dim1_yitem, dim2_yitem = dim1_c[item_sep:], dim2_c[item_sep:]
coords = {'x': [dim1_xitem, dim2_xitem],
'y': [dim1_yitem, dim2_yitem]}
self.plot('CA', coords)
plt.show()
if diags:
_dim = range(1, dim+1)
chisq_stats = [chisq, 'sig: {}'.format(sig),
'dof: {}'.format((obs.shape[0] - 1)*(obs.shape[1] - 1))]
_chisq = ([np.NaN] * (dim-3)) + chisq_stats
_sig = ([np.NaN] * (dim-2)) + [chisq]
_sv, _ev = sv[:dim, 0], ev[:dim, 0]
_expl_inertia = 100 * (ev[:dim, 0] / inertia)
_cumul_expl_inertia = np.cumsum(_expl_inertia)
_perc_chisq = _expl_inertia / 100 * chisq
labels = ['Dimension', 'Singular values', 'Eigen values',
'explained % of Inertia', 'cumulative % explained',
'explained Chi^2', 'Total Chi^2']
results = pd.DataFrame([_dim, _sv, _ev, _expl_inertia,
_cumul_expl_inertia,_perc_chisq, _chisq]).T
results.columns = labels
results.set_index('Dimension', inplace=True)
return results
def _get_point_label_map(self, type, point_coords):
if type == 'CA':
xcoords = list(zip(point_coords['x'][0],point_coords['x'][1]))
xlabels = self.crossed_quantities[0].xdef
x_point_map = {lab: coord for lab, coord in zip(xlabels, xcoords)}
ycoords = list(zip(point_coords['y'][0], point_coords['y'][1]))
ylabels = self.crossed_quantities[0].ydef
y_point_map = {lab: coord for lab, coord in zip(ylabels, ycoords)}
return {'x': x_point_map, 'y': y_point_map}
def mass(self, x, y, w=None, margin=None):
"""
Compute rel. margins or total cell frequencies of a contigency table.
"""
counts = self.crossed_quantities[0].count(margin=False)
total = counts.cbase[0, 0]
if margin is None:
return counts.result.values / total
elif margin == 'x':
return counts.rbase[1:, :] / total
elif margin == 'y':
return (counts.cbase[:, 1:] / total).T
def expected_counts(self, x, y, w=None, return_observed=False):
"""
Compute expected cell distribution given observed absolute frequencies.
"""
#self.single_quantities, self.crossed_quantities = self._get_quantities()
counts = self.crossed_quantities[0].count(margin=False)
total = counts.cbase[0, 0]
row_m = counts.rbase[1:, :]
col_m = counts.cbase[:, 1:]
if not return_observed:
return (row_m * col_m) / total
else:
return counts.result.values, (row_m * col_m) / total
def chi_sq(self, x, y, w=None, sig=False, as_inertia=False):
"""
Compute global Chi^2 statistic, optionally transformed into Inertia.
"""
obs, exp = self.expected_counts(x=x, y=y, return_observed=True)
diff_matrix = ((obs - exp)**2) / exp
total_chi_sq = np.nansum(diff_matrix)
if sig:
dof = (obs.shape[0] - 1) * (obs.shape[1] - 1)
sig_result = np.round(1 - chi2dist.cdf(total_chi_sq, dof), 3)
if as_inertia: total_chi_sq /= np.nansum(obs)
if sig:
return total_chi_sq, sig_result
else:
return total_chi_sq
def _svd(self, matrix, return_eigen_matrices=True, return_eigen=True):
"""
Singular value decomposition wrapping np.linalg.svd().
"""
u, s, v = np.linalg.svd(matrix, full_matrices=False)
s = s[:, None]
if not return_eigen:
if return_eigen_matrices:
return s, u, v
else:
return s
else:
if return_eigen_matrices:
return s, u, v, (s ** 2)
else:
return s, (s ** 2)
class LinearModels(Multivariate):
"""
OLS REGRESSION, ...
"""
def __init__(self, dataset):
self.ds = dataset.copy()
self.single_quantities = None
self.crossed_quantities = None
self.analysis = 'LinearModels'
def set_model(self, y, x, w=None, intercept=True):
"""
"""
self._select_variables(x=x, y=y, w=w, drop_listwise=True)
self._get_quantities()
self._matrix = self.ds[self.y + self.x + [self.w]].dropna().values
ymean = self.single_quantities[-1].summarize('mean', as_df=False)
self._ymean = ymean.result[0, 0]
self._use_intercept = intercept
self.dofs = self._dofs()
predictors = ' + '.join(self.x)
if self._use_intercept: predictors = 'c + ' + predictors
self.formula = '{} ~ {}'.format(y, predictors)
return self
def _dofs(self):
"""
"""
correction = 1 if self._use_intercept else 0
obs = self._matrix[:, -1].sum()
tdof = obs - correction
mdof = len(self.x)
rdof = obs - mdof - correction
return [tdof, mdof, rdof]
def _vectors(self):
"""
"""
w = self._matrix[:, [-1]]
y = self._matrix[:, [0]]
x = self._matrix[:, 1:-1]
x = np.concatenate([np.ones((x.shape[0], 1)), x], axis=1)
return w, y, x
def get_coefs(self, standardize=False):
coefs = self._coefs() if not standardize else self._betas()
coef_df = pd.DataFrame(coefs,
index = ['-c-'] + self.x
if self._use_intercept else self.x,
columns = ['b']
if not standardize else ['beta'])
coef_df.replace(np.NaN, '', inplace=True)
return coef_df
def _betas(self):
"""
"""
corr_mat = Relations(self.ds).corr(self.x, self.y, self.w, n=False, sig=None,
drop_listwise=True, matrixed=True)
corr_mat = corr_mat.values
predictors = corr_mat[:-1, :-1]
y = corr_mat[:-1, [-1]]
inv_predictors = np.linalg.inv(predictors)
betas = inv_predictors.dot(y)
if self._use_intercept:
betas = np.vstack([[np.NaN], betas])
return betas
def _coefs(self):
"""
"""
w, y, x = self._vectors()
coefs = np.dot(np.linalg.inv(np.dot(x.T, x*w)), np.dot(x.T, y*w))
return coefs
def get_modelfit(self, r_sq=True):
anova, fit_stats = self._sum_of_squares()
dofs = np.round(np.array(self.dofs)[:, None], 0)
anova_stats = np.hstack([anova, dofs, fit_stats])
anova_df = pd.DataFrame(anova_stats,
index=['total', 'model', 'residual'],
columns=['sum of squares', 'dof', 'R', 'R^2'])
anova_df.replace(np.NaN, '', inplace=True)
return anova_df
def _sum_of_squares(self):
"""
"""
w, y, x = self._vectors()
x_w = x*w
hat = x_w.dot(np.dot(np.linalg.inv(np.dot(x.T, x_w)), x.T))
tss = (w*(y - self._ymean)**2).sum()[None]
rss = y.T.dot(np.dot(np.eye(hat.shape[0])-hat, y*w))[0]
ess = tss-rss
all_ss = np.vstack([tss, ess, rss])
rsq = np.vstack([[np.NaN], ess/tss, [np.NaN]])
r = np.sqrt(rsq)
all_rs = np.hstack([r, rsq])
return all_ss, all_rs
def estimate(self, estimator='ols', diags=True):
"""
"""
# Wrap up the modularized computation methods
coefs, betas = self.get_coefs(), self.get_coefs(True)
modelfit = self.get_modelfit()
# Compute diagnostics, i.e. standard errors and sig. of estimates/fit
# prerequisites
w, _, x = self._vectors()
rss = modelfit.loc['residual', 'sum of squares']
ess = modelfit.loc['model', 'sum of squares']
# coefficients: std. errors, t-stats, sigs
c_se = np.diagonal(np.sqrt(np.linalg.inv(np.dot(x.T,x*w)) *
(rss/self.dofs[-1])))[None].T
c_sigs = np.hstack(get_pval(self.dofs[-1], coefs/c_se))
c_diags = np.round(np.hstack([c_se, c_sigs]), 6)
c_diags_df = pd.DataFrame(c_diags, index=coefs.index,
columns=['se', 't-stat', 'p'])
# modelfit: se, F-stat, ...
m_se = np.vstack([[np.NaN], np.sqrt(rss/self.dofs[-1]), [np.NaN]])
m_fstat = np.vstack([[np.NaN],
(ess/self.dofs[1]) / (rss/self.dofs[-1]),
[np.NaN]])
m_sigs = 1-fdist.cdf(m_fstat, self.dofs[1], self.dofs[-1])
m_diags = np.round(np.hstack([m_se, m_fstat, m_sigs]), 6)
m_diags_df = pd.DataFrame(m_diags, index=modelfit.index,
columns=['se', 'F-stat', 'p'])
# Put everything together
parameter_results = pd.concat([coefs, betas, c_diags_df], axis=1)
fit_summary = | pd.concat([modelfit, m_diags_df], axis=1) | pandas.concat |
import numpy as np
from glob import glob
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import corner
import hadcrut5
import re
import os
from scipy.stats import norm
from settings import datafolder
from tqdm import tqdm
from steric_tools import parse_run
print('combine_v2.py')
# This file combines ice with steric.
#
# 1) Combine the dSdt and T estimates from all contributors
# 2) Combine the TSLS estimates of all contributors (NOT DONE YET)
np.random.seed(1337)
#FIRST COMBINE THE dSdt vs T data.
steric = pd.read_csv(f'{datafolder}/processed_data/ExtractedFromSSH/StericTvsRate_averaged.csv', index_col=0)
steric['model_key'] = steric.model + ':p' + steric['p'].astype(str) + ':' + steric.startyr.astype(str) + ':' + steric.endyr.astype(str) + ':' + steric.scenario
steric['probability_weight'] = 0.0
#---------------------------------
# steric = pd.read_csv(f'{datafolder}/processed_data/ExtractedFromSSH/StericTvsRate.csv', index_col=0)
# steric = steric.join(parse_run(steric.run))
# steric['model_key'] = steric.model + ':p' + steric['p'] + ':' + steric.startyr.astype(str) + ':' + steric.endyr.astype(str) + ':' + steric.scenario
# mymean = lambda x: x.mean() if x.dtype == np.float64 else x.iloc[0]
# steric = steric.groupby(by=['model_key']).agg(mymean).reset_index()
# steric['probability_weight'] = 0.0
# #Add a column with how many times each model_key appears in the dataset
# counts = steric.pivot_table(index=['model_key'], aggfunc='size')
# counts = pd.DataFrame(counts) # Convert Series to DataFrame
# counts.index.name = 'model_key'
# counts.reset_index(inplace=True) # Change row names to be a column
# counts.columns = ['model_key', 'counts']
# steric = steric.merge(counts) # Merge dataframes on common column
tfolder = f'{datafolder}/processed_data/ExtractedFromTamsin/'
ice = {'WAIS': None,
'EAIS': None,
'PEN': None,
'GIC': None,
'GrIS': None}
tsls_steric = | pd.read_csv(f'{datafolder}/processed_data/TSLS_estimates/tsls_steric.csv') | pandas.read_csv |
###
# Generate breakdown of missing data by column name
###
# Output:
# | state | eviction_records | demographic_records | joined_records |
# | ------------- | ------------------------ | -------------------------- | --------------------------- |
# | state fips | # of eviction record | # of demographic records | # of records in public data |
###
import os
import sys
import csv
import pandas as pd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
EVICTIONS_DATA_DIR = os.path.join(BASE_DIR, 'data', 'full-evictions')
DEMOGRAPHICS_DATA_DIR = os.path.join(BASE_DIR, 'data', 'demographics')
if __name__ == '__main__':
geography = sys.argv[1]
filename = geography + '.csv'
# load data frame with eviction records
evictions_df = pd.read_csv(
os.path.join(EVICTIONS_DATA_DIR, filename),
dtype = { 'GEOID': 'object' })
# get list of geoids
evictions_geoids = evictions_df.drop_duplicates('GEOID')['GEOID'].tolist()
# load data frame with demographic records
demographics_df = pd.read_csv(
os.path.join(DEMOGRAPHICS_DATA_DIR, filename),
dtype = { 'GEOID': 'object' })
# get unique geoids
demographics_geoids = demographics_df.drop_duplicates('GEOID')['GEOID'].tolist()
# get geoids that appear in eviction records but not demographics
geoids_w_no_dem = list(set(evictions_geoids) - set(demographics_geoids))
# output as csv
data = []
for item in geoids_w_no_dem:
data.append({ 'type': geography, 'GEOID': item })
output_df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python3
from collections import OrderedDict
from datetime import datetime
from docx import Document
from docx.shared import Inches
from docx.enum.text import WD_ALIGN_PARAGRAPH
from glob import glob
from joblib import dump, load
from matplotlib import cm
from matplotlib import pyplot
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.transforms import Bbox
from PIL import Image
from PIL import ImageDraw, ImageFont
from scipy.stats import spearmanr, pearsonr
from typing import Dict, Any
import copy
import cv2
import hashlib
import itertools
import json
import math
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import os
import pandas as pd
import pathlib
import pickle
import pylab
import random
import scipy.cluster.hierarchy as sch
import seaborn as sns
import shutil
import sys
import tempfile
import time
# delong
import rpy2
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
pandas2ri.activate()
from loadData import *
from utils import *
from parameters import *
from evaluate_utils import *
from contextlib import contextmanager
### parameters
cFigNumber = 1
document = None
def getResults (dList):
mlflow.set_tracking_uri(TrackingPath)
if os.path.exists("./results/results.feather") == False:
results = []
for d in dList:
current_experiment = dict(mlflow.get_experiment_by_name(d))
experiment_id = current_experiment['experiment_id']
runs = MlflowClient().search_runs(experiment_ids=experiment_id, max_results=50000)
for r in runs:
row = r.data.metrics
row["UUID"] = r.info.run_uuid
row["Model"] = r.data.tags["Version"]
row["Parameter"] = r.data.tags["pID"]
# stupid naming error
row["Parameter"] = row["Parameter"]
row["Model"] = row["Model"]
row["FSel"], row["Clf"] = row["Model"].split("_")
row["Dataset"] = d
row["nFeatures"] = eval(row["Parameter"])[row["FSel"]]["nFeatures"]
row["Path"] = os.path.join(TrackingPath, str(experiment_id), str(r.info.run_uuid), "artifacts")
results.append(row)
# read timings
apath = os.path.join(row["Path"], "timings.json")
with open(apath) as f:
expData = json.load(f)
row.update(expData)
# read AUCs
apath = os.path.join(row["Path"], "aucStats.json")
with open(apath) as f:
aucData = json.load(f)
row.update(aucData)
results = pd.DataFrame(results)
print ("Pickling results")
pickle.dump (results, open("./results/results.feather","wb"))
else:
print ("Restoring results")
results = pickle.load(open("./results/results.feather", "rb"))
return results
def delongTest (predsX, predsY):
pROC = importr('pROC')
Y = predsX["y_true"].values
scoresA = predsX["y_pred"].values
scoresB = predsY["y_pred"].values
rocA = pROC.roc (Y, scoresA)
rocB = pROC.roc (Y, scoresB)
aucA = pROC.auc(Y, scoresA)
aucB = pROC.auc(Y, scoresB)
#print ("AUC A:" + str(aucA))
#print ("AUC B:" + str(aucB))
robjects.globalenv['rocA'] = rocA
robjects.globalenv['rocB'] = rocB
z = rpy2.robjects.packages.reval ("library(pROC);z = roc.test(rocA, rocB, method= 'delong', progress='none'); p = z$p.value")
z = robjects.r.z
p = robjects.r.p[0]
return p
def getDeLongTest (dList, results):
# check cache..
if os.path.exists("./results/delongTests.joblib") == False:
rMatList = {}
for d in dList:
print(d)
fSels = sorted(list(set(results["FSel"].values)))
nList = sorted(list(set(results["nFeatures"].values)))
clfs = sorted(list(set(results["Clf"].values)))
rMat = np.zeros( (len(clfs), len(fSels) ) )
rMat = pd.DataFrame(rMat, index = clfs, columns = fSels)
# get best overall
aTable = results.query("Dataset == @d and FSel in @fSels")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
best = aTable.iloc[0].copy()
# before we start, test if best model is different from random?
predsX = pd.read_csv(os.path.join(best["Path"], "preds.csv"))
predsY = predsX.copy()
predsY["y_pred"] = 0.5
print ("Testing if best model is better than random:")
p = delongTest (predsX, predsY)
if p < 0.05:
print ("Yes, p = ", p)
else:
print ("No, p = ", p)
for c in clfs:
for f in fSels:
aTable = results.query("Dataset == @d and FSel == @f and Clf == @c")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
cur = aTable.iloc[0].copy()
# load both preds
predsX = pd.read_csv(os.path.join(best["Path"], "preds.csv"))
predsY = pd.read_csv(os.path.join(cur["Path"], "preds.csv"))
p = delongTest (predsX, predsY)
if p < 0.05:
pass
else:
rMat.at[c,f] = p
rMatList[d] = rMat
dump(rMatList, "./results/delongTests.joblib")
else:
print ("Restoring delong results")
rMatList = load( "./results/delongTests.joblib")
return rMatList
def plot_DataHisto (dList):
DPI = 300
fig, ax = plt.subplots(4,4, figsize = (25, 20), dpi = DPI)
N = len(dList)
palette = sns.color_palette("hls", N+N//3)[:N]
for fidx, d in enumerate(dList):
X, y = datasets[d]
M = X.corr().values
mask = np.triu(M*0+1, k =1)
v = np.extract(mask, M)
fidx_y= fidx % 4
fidx_x = fidx//4
doHistoPlot (v, d, "./results/Data_Hist_" + d + ".png", ax = ax[fidx_x][fidx_y], color = palette[fidx], fig = fig)
# use last one, does not matter which one actually
data = eval (d+"().getData('./data/')")
y = data["Target"]
X = data.drop(["Target"], axis = 1)
X, y = preprocessData (X, y)
arrays = [np.random.normal(loc=0, scale=1, size=(X.shape[0])) for s in range(X.shape[1])]
X = np.vstack(arrays).T
X = pd.DataFrame(X)
M = X.corr().values
mask = np.triu(M*0+1, k =1)
f = np.extract(mask, M)
doHistoPlot (f, "Normal", "./results/Data_Hist_Normal.png", ax = ax[3][3], color = "black", range = True, fig = fig)
# remove unused plot
for x, y in [ (3,2)]:
ax[x][y] .spines['right'].set_visible(False)
ax[x][y] .spines['top'].set_visible(False)
ax[x][y] .spines['bottom'].set_visible(False)
ax[x][y] .spines['left'].set_visible(False)
ax[x][y] .spines['left'].set_visible(False)
ax[x][y].xaxis.set_visible(False)
ax[x][y].yaxis.set_visible(False)
plt.tight_layout(pad=3.0)
fig.savefig("./paper/Figure_2.png", facecolor = 'w', bbox_inches='tight')
plt.close('all')
plt.rc('text', usetex=False)
pass
def countModels (dList, results):
rMatList = getDeLongTest (dList, results)
cnts = {}
for d in rMatList:
z = rMatList[d] >= 0.05
cnts[d] = {"Count": int(np.sum(z.values)) - 1, # because best model does not count
"AUC": np.round (np.max(results.query(' Dataset == @d')["AUC"]), 2) }
df = pd.DataFrame(cnts).T[["AUC", "Count"]]
df["Count"] = df["Count"].astype(np.uint32)
df = df.sort_values(["AUC"], ascending = False)
df.to_csv("./paper/Table_4.csv")
# dont care which f,
f = "MIM"
z = results.query(' Dataset == @d and FSel == @f')
nDataSets= len(set(results["Dataset"]))
print ("#Datasets", nDataSets)
print ("#Models per Dataset", results.shape[0]/nDataSets)
nFSel= len(set(results["FSel"]))
print ("#Models per FSel and Dataset", results.shape[0]/nDataSets/nFSel)
nClf = len(set(results["Clf"]))
print ("#Classifier", nClf)
# number of features=1,2,..64= 7
nF = len(set(z["nFeatures"]))
print ("Have", nF, "number of features")
# number of hyperparameters
cf = z["Clf"].value_counts()/nF
print ("Have for each FSel", sum(cf.values), "models/hyperparameters")
total = rMatList[d].shape[0]* rMatList[d].shape[1] - 1 # best model doesnt really count
print ("Total stat.eq. models over all datasets:", np.mean(df["Count"]), "/", total)
print ("Percentage of stat eq. models per dataset:", np.mean(df["Count"])/total)
total = total*len(rMatList)
print ("Total stat.eq. models over all datasets:", np.sum(df["Count"]), "/", total)
print ("Percentage of stat.eq. models over all datasets:", np.sum(df["Count"])/total)
def plot_modelAUCs (dList, results):
print ("Plotting model AUCs")
rMatList = getDeLongTest (dList, results)
sTable = []
for d in rMatList.keys():
# no idea what pandas fct to use..
for f in rMatList[d].keys():
for c in rMatList[d].index:
sTable.append({"Dataset": d, "FSel": f, "Clf": c, "p": rMatList[d].at[c,f]})
sTable = pd.DataFrame(sTable)
for (i, (idx, row)) in enumerate(sTable.iterrows()):
d, f, c = row["Dataset"], row["FSel"], row["Clf"]
aTable = results.query ("Dataset == @d and FSel == @f and Clf == @c")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
cur = aTable.iloc[0].copy()
sTable.at[idx, "AUC"] = cur["AUC"]
sTable["Statistically Similar"] = sTable["p"] >= 0.05
if 1 == 1:
DPI = 300
fig, ax = plt.subplots(figsize = (15, 10), dpi = DPI)
sns.set(style='white')
#strange but well
# nFSel = len(set([k[0] for k in z.index]))
palette = sns.color_palette("hls", 8)[0::4]
sns.stripplot(x="AUC", y="Dataset", jitter = 0.25, data=sTable, palette = palette, hue = "Statistically Similar")
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel('Dataset', fontsize = 22, labelpad = 12)
plt.xlabel('AUC-ROC', fontsize= 22, labelpad = 12)
plt.setp(ax.get_legend().get_texts(), fontsize='16') # for legend text
plt.setp(ax.get_legend().get_title(), fontsize='20') # for legend title
#ax.set_xticks(nList[1:])#, rotation = 0, ha = "right", fontsize = 22)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
plt.tight_layout()
fig.savefig("./paper/Figure_1.png", facecolor = 'w', bbox_inches='tight')
print ("Done: Plotting model AUCs")
pass
def stability (u, v):
assert (len(u) == len(v))
m = len(u)
SC = 0
for i in range(m):
for j in range(i+1,m):
coef, p = pearsonr(u[i,:], v[j,:])
SC = SC + coef
SC = 2/(m*(m-1))*SC
return SC
def getFPattern (model):
z = []
for m in range(nCV):
apath = os.path.join(model["Path"], "FPattern_" + str(m) + ".json")
with open(apath) as f:
expData = json.load(f)
z.append(list(expData.values()))
z = np.asarray(z)
return z
def getStability (model):
z = getFPattern (model)
SC = stability (z, z)
return SC
def plot_Stability_Curves (dList, results):
global document;
global cFigNumber;
document = Document()
font = document.styles['Normal'].font
font.name = 'Arial'
document.add_heading('Supplemental 2')
document.add_paragraph(' ')
document.add_heading('Stability vs. number of features', level = 2)
document.add_paragraph(' ')
# check cache..
if os.path.exists("./results/stability.joblib") == False:
fTable = []
for d in dList:
fSels = sorted(list(set(results["FSel"].values)))
nList = sorted(list(set(results["nFeatures"].values)))
for f in fSels:
for n in nList:
aTable = results.query("Dataset == @d and FSel == @f and nFeatures == @n").copy()
for (i, (idx, row)) in enumerate(aTable.iterrows()):
stab = getStability(row)
aTable.at[idx, "Stability"] = stab
# should be the same, but if there was any kind of numerical error,
# taking median makes more sense
fTable.append({"Dataset": d, "Feature Selection": f, "N": n, "Stability": np.median(aTable["Stability"]) })
dump(fTable, "./results/stability.joblib")
else:
print ("Restoring stability results")
fTable = load( "./results/stability.joblib")
# group by dataset and take mean
df = pd.DataFrame(fTable)
z = df.groupby(["Feature Selection", "N"]).median(["Stability"])
nList = sorted(list(set(df["N"].values)))
def doPlot (z, fname):
DPI = 300
fig, ax = plt.subplots(figsize = (10, 10), dpi = DPI)
sns.set(style='white')
#strange but well
nFSel = len(set([k[0] for k in z.index]))
palette = sns.color_palette("hls", nFSel+1)[0:nFSel]
palette[2] = (0.9, 0.9, 0.2)
line=sns.lineplot(x="N", y="Stability",hue="Feature Selection", palette = palette, marker="o", data=z, linewidth = 4)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel('Mean Stability', fontsize = 22, labelpad = 12)
plt.xlabel('Number of Selected Features', fontsize= 22, labelpad = 12)
plt.setp(ax.get_legend().get_texts(), fontsize='16') # for legend text
plt.setp(ax.get_legend().get_title(), fontsize='20') # for legend title
ax.set_xticks(nList[1:])#, rotation = 0, ha = "right", fontsize = 22)
# set the linewidth of each legend object
leg = ax.get_legend()
for line in leg.get_lines():
line.set_linewidth(4.0)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
plt.tight_layout()
fig.savefig(fname, facecolor = 'w', bbox_inches='tight')
return plt, ax
doPlot (z, "./paper/Figure_3.png")
paragraph = document.add_paragraph('')
document.add_picture("./paper/Figure_3.png", width=Inches(6.0))
paragraph = document.add_paragraph('Figure S' + str(cFigNumber) + ": Relation of feature stability with the number of selected features.")
cFigNumber = cFigNumber + 1
# each dataset
for d in dList:
z = df.query("Dataset == @d")
z = z.groupby(["Feature Selection", "N"]).median(["Stability"])
doPlot (z, "./results/Stability_" + d + ".png")
# add to doc
document.add_page_break()
paragraph = document.add_paragraph(d)
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
document.add_picture("./results/Stability_"+d+".png", width=Inches(6.0))
paragraph = document.add_paragraph('Figure S' + str(cFigNumber) + ": Relation of feature stability with the number of selected features on dataset " + str(d) + ".")
#paragraph = document.add_paragraph(" ")
cFigNumber = cFigNumber + 1
document.add_page_break()
document.save('./paper/Supplemental_2.docx')
plt.close('all')
pass
def getSimilarity (cur, best):
u = getFPattern (best)
v = getFPattern (cur)
SC = stability (u, v)
return SC
# cross similairty of a pattern
def correlation (X, fu, fv):
assert (len(fu) == len(fv))
cu = []
idxU = np.where(fu == 1.0)[0]
idxV = np.where(fv == 1.0)[0]
for u in idxU:
cv = []
for v in idxV:
corr, pval = pearsonr (X.values[:,u], X.values[:,v])
cv.append(np.abs(corr))
cu.append(np.max(cv))
CS = np.mean(cu)
return CS
# called ucorr in supplemental
def getRawCorrelation (cur, best, X):
assert (best["Dataset"] == cur["Dataset"])
patBest = getFPattern (best)
patCur = getFPattern (cur)
assert (len(patBest) == len(patCur))
m = len(patBest)
CS = 0
for i in range(m):
for j in range(m):
coef = correlation (X, patBest[i,:], patCur[j,:])
CS = CS + coef
CS = 1/(m*m)*CS
return CS
def getCorrelation (cur, best, X):
assert (best["Dataset"] == cur["Dataset"])
CSp = getRawCorrelation (cur, best, X)
CSm = getRawCorrelation (best, cur, X)
return (CSp + CSm)/2.0
def highlight_cell(x,y, ax=None, **kwargs):
rect = plt.Rectangle((x-.5, y-.5), 1,1, fill=False, **kwargs)
ax = ax or plt.gca()
ax.add_patch(rect)
return rect
def plot_Tables (dList, results, cType = None):
# set params
if cType == "Stability":
sFile = "Supplemental 3"
if cType == "Similarity":
sFile = "Supplemental 4"
if cType == "Correlation":
sFile = "Supplemental 5"
# create supplemental
global document;
global cFigNumber;
document = Document()
font = document.styles['Normal'].font
font.name = 'Arial'
document.add_heading(sFile)
document.add_paragraph(' ')
document.add_heading("Feature " + cType, level = 2)
document.add_paragraph(' ')
rMatList = getDeLongTest (dList, results)
sscMatList = getSSC (dList, results)
# prepare data
for d in dList:
# ssc contains only stat sim models
z = sscMatList[d]
rMat = rMatList[d]
rMat = rMat.round(3)
scMat = rMat.copy()
strMat = rMat.copy()
strMat = strMat.astype( dtype = "str")
# get best one
aTable = results.query("Dataset == @d")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
best = aTable.iloc[0].copy()
bestc = best["Clf"]
bestf = best["FSel"]
for (i, (idx, row)) in enumerate(z.iterrows()):
c = row["Classifier"]
f = row["Feature Selection"]
nF = row["nFeatures"]
auc = row["AUC"]
scMat.at[c,f] = row[cType]
k = str(np.round(scMat.at[c, f], 2))
if k == "-0.0":
k = "0.0"
strMat.at[c,f] = r'\huge{' + k + "}\n\Large{AUC:" + str(auc) + " (p=" + str(rMat.at[c,f]) + ")"+ "\n" + "\large{\#Features: " + str(nF) + "}"
strMat.at[c,f]
if 1 == 1:
plt.rc('text', usetex=True)
plt.rcParams.update({
"font.family": "sans-serif",
"font.sans-serif": ["Arial"]})
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = r'''
\usepackage{mathtools}
\usepackage{helvet}
\renewcommand{\familydefault}{\sfdefault} '''
DPI = 300
fig, ax = plt.subplots(figsize = (17,14), dpi = DPI)
sns.set(style='white')
#ax = sns.heatmap(scMat, annot = cMat, cmap = "Blues", fmt = '', annot_kws={"fontsize":21}, linewidth = 2.0, linecolor = "black")
dx = np.asarray(scMat, dtype = np.float64)
pal = sns.light_palette("#8888bb", reverse=False, as_cmap=True)
tnorm = colors.TwoSlopeNorm(vmin=0.0, vcenter=0.5, vmax=1.0)
ax.imshow(dx, cmap=pal, norm = tnorm, interpolation='nearest', aspect = 0.49)
# Major ticks
mh, mw = scMat.shape
ax.set_xticks(np.arange(0, mw, 1))
ax.set_yticks(np.arange(0, mh, 1))
# Minor ticks
ax.set_xticks(np.arange(-.5, mw, 1), minor=True)
ax.set_yticks(np.arange(-.5, mh, 1), minor=True)
# Gridlines based on minor ticks
ax.grid(which='minor', color='black', linestyle='-', linewidth=2)
for i, c in enumerate(scMat.index):
for j, f in enumerate(scMat.keys()):
if rMat.at[c,f] < 0.05:
ax.text(j, i, ' ', ha="center", va="center", color="k", fontsize = 12)
elif scMat.at[c,f] < -0.95:
ax.text(j, i, '\huge{N/A}', ha="center", va="center", color="k", fontsize = 12)
else:
ax.text(j, i, strMat.at[c, f], ha="center", va="center", color="k", fontsize = 12)
plt.tight_layout()
bestcidx = list(scMat.keys()).index(bestf)
bestfidx = list(scMat.index).index(bestc)
highlight_cell(bestcidx, bestfidx, color="royalblue", linewidth=10)
ax.set_xticklabels(rMat.keys(), rotation = 45, ha = "right", fontsize = 22)
ax.set_yticklabels(strTrans(rMat.index), rotation = 0, ha = "right", fontsize = 22)
ax.yaxis.set_tick_params ( labelsize= 22)
fig.savefig("./results/Table_" + cType + "_"+d+".png", facecolor = 'w', bbox_inches='tight')
paragraph = document.add_paragraph(cType + " on " + d)
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
document.add_picture("./results/Table_" + cType + "_" + d+".png", width=Inches(6.0))
paragraph = document.add_paragraph('Figure S' + str(cFigNumber) + ": " + cType + " of the models on dataset " + str(d) + ". The best model is framed with a blue border, models that " + \
"were significantly different to the best model are not shown. Statistical significance was tested using a DeLong test.")
cFigNumber = cFigNumber + 1
document.add_page_break()
plt.close('all')
plt.rc('text', usetex=False)
document.save("./paper/" + sFile.replace(" ", "_") + ".docx")
plt.close('all')
pass
def getSSC (dList, results):
rMatList = getDeLongTest (dList, results)
if os.path.exists("./results/ssc.feather") == False:
sscMatList = {}
for fidx, d in enumerate(dList):
X, y = datasets[d]
rMat = rMatList[d]
rMat = rMat.round(3)
fSels = sorted(list(set(results["FSel"].values)))
nList = sorted(list(set(results["nFeatures"].values)))
clfs = sorted(list(set(results["Clf"].values)))
# get best overall
aTable = results.query("Dataset == @d")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
best = aTable.iloc[0].copy()
fTable = []
for c in clfs:
for f in fSels:
if rMat.at[c,f] > 0.05:
aTable = results.query("Dataset == @d and FSel == @f and Clf == @c")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
cur = aTable.iloc[0].copy()
sim = getSimilarity (cur, best)
stab = getStability (cur)
corr = getCorrelation (cur, best, X)
fTable.append({"Feature Selection": f, "Classifier": c, "AUC": round(cur["AUC"],2),
"nFeatures": cur["nFeatures"],
"Stability": round(stab,2),
"Similarity": round(sim, 2),
"Correlation": round(corr,2)})
sscMatList[d] = pd.DataFrame(fTable)
print ("Pickling SSC results")
pickle.dump (sscMatList, open("./results/ssc.feather","wb"))
else:
print ("Restoring SSC results")
sscMatList = pickle.load(open("./results/ssc.feather", "rb"))
return sscMatList
def table_ZZ (dList, results):
rMatList = getDeLongTest (dList, results)
sscMatList = getSSC (dList, results)
# prepare data
df = []
for d in dList:
# ssc contains only stat sim models
z = sscMatList[d]
# did not save the best model, find it
aTable = results.query("Dataset == @d")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
best = aTable.iloc[0].copy()
for (i, (idx, row)) in enumerate(z.iterrows()):
if row["Feature Selection"] == best["FSel"] and row["Classifier"] == best["Clf"] and row["nFeatures"] == best["nFeatures"]:
continue
df.append({"Dataset": d, "Stability": row["Stability"], "Similarity": row["Similarity"], "Correlation": row["Correlation"] })
df = pd.DataFrame(df)
rMat = df.groupby(["Dataset"]).mean()
minMat = df.groupby(["Dataset"]).min()
maxMat = df.groupby(["Dataset"]).max()
minMat = minMat.round(2).astype(str)
maxMat = maxMat.round(2).astype(str)
# labels need range
labels = rMat.copy()
labels = labels.round(2).astype(str)
for c in list(labels.index):
for f in list(labels.keys()):
labels.at[c,f] = labels.at[c,f] + "\n(" + minMat.at[c,f] + "-" + maxMat.at[c,f] + ")"
labels = np.array(labels)
DPI = 300
if 1 == 1:
fig, ax = plt.subplots(figsize = (10,15), dpi = DPI)
sns.set(style='white')
sns.heatmap(rMat, annot = labels, cmap = "Reds", fmt = '', annot_kws={"fontsize":21}, cbar = False)
ax.set_xticklabels(rMat.keys(), rotation = 45, ha = "right", fontsize = 21)
ax.set_yticklabels(rMat.index, rotation = 0, ha = "right", fontsize = 21)
ax.yaxis.set_tick_params ( labelsize= 21)
ax.set_xlabel ("", fontsize = 19)
ax.set_ylabel ("", fontsize = 19)
ax.set_title("", fontsize = 24)
plt.tight_layout()
fig.savefig("./paper/Figure_4.png", facecolor = 'w')
def plot_ZZ (dList, results):
rMatList = getDeLongTest (dList, results)
sscMatList = getSSC (dList, results)
def doPlot (z, v, fname, ax):
sns.set(style='white')
# prepare data
df = []
z = z.sort_values(["Stability"], ascending = False).reset_index(drop = True)
for (i, (idx, row)) in enumerate(z.iterrows()):
p = v.at[row["Classifier"], row["Feature Selection"]]
if p >= 0.05:
df.append({"Value": row["Stability"], "Type": "Stability", "Index": idx})
df.append({"Value": row["Similarity"], "Type": "Similarity", "Index": idx})
df.append({"Value": row["Correlation"], "Type": "Correlation", "Index": idx})
df = pd.DataFrame(df)
#z["Value"] = z["Value"].replace(-1, np.inf)
palette = sns.color_palette("hls", 17)[4::6]
try:
line=sns.lineplot(x="Index", y="Value",hue="Type", palette = palette, marker="o", data=df, linewidth = 4, ax = ax, legend =None)
except Exception as e:
print(z)
print(z.head())
raise(e)
#ax = ax[0][0]
ax.xaxis.set_tick_params(labelsize=20)
ax.yaxis.set_tick_params(labelsize=20)
ax.set_ylabel('Correlation', fontsize = 22, labelpad = 12)
ax.set_xlabel('Model', fontsize= 22, labelpad = 12)
ax.set_title(str(d), fontsize="24", fontweight="bold")
ax.set_ylim(0, 1)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
extent = full_extent(ax).transformed(fig.dpi_scale_trans.inverted())
# extent = ax.get_tightbbox(fig.canvas.renderer).transformed(fig.dpi_scale_trans.inverted())
fig.savefig(fname, facecolor = 'w', bbox_inches=extent)
return plt, ax
DPI = 200
fig, ax = plt.subplots(4,4, figsize = (20, 20), dpi = DPI)
for fidx, d in enumerate(dList):
fidx_y= fidx % 4
fidx_x = fidx//4
z = sscMatList[d]
v = rMatList[d] # fixme: add p value to sscmatlist....
doPlot (z, v, "./results/Overview_" + d + ".png", ax = ax[fidx_x][fidx_y])
# add legend to bottom right
for x, y in [(3,3), (3,2)]:
ax[x][y] .spines['right'].set_visible(False)
ax[x][y] .spines['top'].set_visible(False)
ax[x][y] .spines['bottom'].set_visible(False)
ax[x][y] .spines['left'].set_visible(False)
ax[x][y] .spines['left'].set_visible(False)
ax[x][y].xaxis.set_visible(False)
ax[x][y].yaxis.set_visible(False)
palette = sns.color_palette("hls", 17)[4::6]
labels = ["Stability", "Similarity", "Correlation"]
handles = [matplotlib.patches.Patch(color=x, label=labels[v]) for v,x in enumerate(palette)]
# Create legend
ax[3][3].legend(handles=handles, loc = "lower right")
# Get current axes object and turn off axis
ax[3][3].set_axis_off()
#ax[3][3].legend(loc = "lower right")
plt.setp(ax[3][3].get_legend().get_texts(), fontsize='24') # for legend text
plt.setp(ax[3][3].get_legend().get_title(), fontsize='24') # for legend title
plt.tight_layout(pad=3.0)
fig.savefig("./results/Figure_ZZ.png", facecolor = 'w', bbox_inches='tight')
plt.close('all')
plt.rc('text', usetex=False)
pass
# mean AUC of all stat. Sig. models vs mean correlation (pre/post/f-corr)
def plot_TradeOff (dList, results):
print ("Plotting trade off")
rMatList = getDeLongTest (dList, results)
sscMatList = getSSC (dList, results)
# prepare data
print ("Preparing data")
df = []
for d in dList:
# ssc contains only stat sim models
z = sscMatList[d]
# did not save the best model, find it
aTable = results.query("Dataset == @d")
aTable = aTable.sort_values("AUC", ascending = False).reset_index(drop = True).copy()
best = aTable.iloc[0].copy()
for (i, (idx, row)) in enumerate(z.iterrows()):
if row["Feature Selection"] == best["FSel"] and row["Classifier"] == best["Clf"] and row["nFeatures"] == best["nFeatures"]:
continue
df.append({"Dataset": d, "AUC": row["AUC"], "Stability": row["Stability"], "Similarity": row["Similarity"], "Correlation": row["Correlation"] })
df = pd.DataFrame(df)
count = df.groupby(["Dataset"]).count()["AUC"]
df = df.groupby(["Dataset"]).mean()
df["Count"] = count
print ("Plotting")
def doPlot(df, d, v1, v2):
for ctype in [v2]:
spfList = df[[v1, ctype]]
R, pval = pearsonr(*zip (*spfList.values))
R2 = R*R
print (R, pval)
# fSels = [z[0] for z in spfList.index]
# dSets = [z[1] for z in spfList.index]
x, y = zip(*spfList.values)
p, cov = np.polyfit(x, y, 1, cov=True) # parameters and covariance from of the fit of 1-D polynom.
y_model = equation(p, x) # model using the fit parameters; NOTE: parameters here are coefficients
# Statistics
n = len(x) # number of observations
ps = p.size # number of parameters
dof = n - ps # degrees of freedom
t = stats.t.ppf(0.975, n - ps) # used for CI and PI bands
# Estimates of Error in Data/Model
resid = y - y_model
chi2 = np.sum((resid / y_model)**2) # chi-squared; estimates error in data
chi2_red = chi2 / dof # reduced chi-squared; measures goodness of fit
s_err = np.sqrt(np.sum(resid**2) / dof) # standard deviation of the error
# plot
if 1 == 1:
DPI = 300
fig, ax = plt.subplots(figsize = (10, 10), dpi = DPI)
# sns.scatterplot (x = x,y = y, ax = ax)
sns.scatterplot (x = v1, y = ctype, data=df, ax = ax, s = 50, color = ".0")
ax.plot(x, y_model, "-", color="0.1", linewidth=1.5, alpha=1.0, label="Fit")
x2 = np.linspace(np.min(x), np.max(x), 100)
y2 = equation(p, x2)
# Confidence Interval (select one)
plot_ci_manual(t, s_err, n, x, x2, y2, ax=ax)
#plot_ci_bootstrap(x, y, resid, ax=ax)
# Prediction Interval
pi = t * s_err * np.sqrt(1 + 1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))
ax.fill_between(x2, y2 + pi, y2 - pi, color="None", linestyle="--")
# ax.plot(x2, y2 - pi, "--", color="0.5", label="95% Prediction Limits")
# ax.plot(x2, y2 + pi, "--", color="0.5")
# Figure Modifications --------------------------------------------------------
# Borders
ax.spines["top"].set_color("0.5")
ax.spines["bottom"].set_color("0.5")
ax.spines["left"].set_color("0.5")
ax.spines["right"].set_color("0.5")
ax.get_xaxis().set_tick_params(direction="out")
ax.get_yaxis().set_tick_params(direction="out")
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
#ax.invert_xaxis()
# Labels
#plt.title("Fit Plot for Weight", fontsize="14", fontweight="bold")
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
if v2 == "Correlation":
plt.ylabel('Mean Correlation', fontsize = 22, labelpad = 12)
if v2 == "Stability":
plt.ylabel('Mean Stability', fontsize = 22, labelpad = 12)
if v2 == "Similarity":
plt.ylabel('Mean Similarity', fontsize = 22, labelpad = 12)
if v2 == "Count":
plt.ylabel('Number of stat. similar Models', fontsize = 22, labelpad = 12)
if v1 == "AUC":
plt.xlabel('Mean AUC-ROC', fontsize= 22, labelpad = 12)
if v1 == "Correlation":
plt.xlabel('Mean Correlation', fontsize= 22, labelpad = 12)
if v1 == "Sample Size":
plt.xlabel('Sample Size', fontsize = 22, labelpad = 12)
ax.set_xticks([50,250,500,750])
right = 0.95
ypos = 0.07 #0.93s
legtext = ''
if len(legtext ) > 0:
ypos = 0.07
legtext=legtext+"\n"
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
legpost = ''
bbox_props = dict(fc="w", ec="0.5", alpha=0.9)
pTxt = (' = {0:0.2f} ($p$ = {1:0.3f})').format(R2, pval)
plt.text (right, ypos,
(legtext + "$R^2$" + pTxt),
horizontalalignment='right',
size = 24, bbox = bbox_props,
transform = ax.transAxes)
plt.rcParams.update({
"text.usetex": False,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
#ax.set_title("Stability vs AUC (" + d + ")", fontsize = 28)
print ("Bias for", d)
fig.tight_layout()
#fig.savefig("./results/AUC_vs_" + ctype + "_"+ d + ".png", facecolor = 'w')
if d == "all":
if v2 == "Correlation" and v1 =="AUC":
fig.savefig("./results/Figure_3A.png", facecolor = 'w')
if v2 == "Count" and v1 == "AUC":
fig.savefig("./results/Figure_3B.png", facecolor = 'w')
if v2 == "Stability" and v1 == "AUC":
fig.savefig("./results/Figure_3C.png", facecolor = 'w')
if v2 == "Similarity" and v1 == "AUC":
fig.savefig("./results/Figure_3D.png", facecolor = 'w')
doPlot (df, d = "all", v1 = "AUC", v2 = "Correlation")
doPlot (df, d = "all", v1 = "AUC", v2 = "Count")
doPlot (df, d = "all", v1 = "AUC", v2 = "Stability")
doPlot (df, d = "all", v1 = "AUC", v2 = "Similarity")
plt.close('all')
pass
def addText (finalImage, text = '', org = (0,0), fontFace = '', fontSize = 12, color = (255,255,255)):
# Convert the image to RGB (OpenCV uses BGR)
#tmpImg = cv2.cvtColor(finalImage, cv2.COLOR_BGR2RGB)
tmpImg = finalImage
pil_im = Image.fromarray(tmpImg)
draw = ImageDraw.Draw(pil_im)
font = ImageFont.truetype(fontFace + ".ttf", fontSize)
draw.text(org, text, font=font, fill = color)
#tmpImg = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
tmpImg = np.array(pil_im)
return (tmpImg.copy())
def addBorder (img, pos, thickness):
if pos == "H":
img = np.hstack([255*np.ones(( img.shape[0],int(img.shape[1]*thickness), 3), dtype = np.uint8),img])
if pos == "V":
img = np.vstack([255*np.ones(( int(img.shape[0]*thickness), img.shape[1], 3), dtype = np.uint8),img])
return img
# just read both figures and merge them
def join_plots():
fontFace = "Arial"
imA = cv2.imread("./results/Figure_3B.png")
imA = addText (imA, "(a)", (50,50), fontFace, 128, color=(0,0,0))
imB = cv2.imread("./results/Figure_3C.png")
imB = addText (imB, "(b)", (50,50), fontFace, 112, color= (0,0,0))
imC = cv2.imread("./results/Figure_3D.png")
imC = addText (imC, "(c)", (50,50), fontFace, 112, color=(0,0,0))
imD = cv2.imread("./results/Figure_3A.png")
imD = addText (imD, "(d)", (50,50), fontFace, 112, color= (0,0,0))
#Image.fromarray(imD[::4,::4,:])
imB = addBorder (imB, "H", 0.075)
imgU = np.hstack([imA, imB])
imD = addBorder (imD, "H", 0.075)
imgL = np.hstack([imC, imD])
imgL = addBorder (imgL, "V", 0.075)
img = np.vstack([imgU, imgL])
#Image.fromarray(img[::6,::6,:])
cv2.imwrite("./paper/Figure_5.png", img)
def stabilityStats (dList, results):
print ("Feature stability stats:")
fTable = load( "./results/stability.joblib")
# group by dataset and take mean
df = | pd.DataFrame(fTable) | pandas.DataFrame |
"""
This script uses the HKplus functions to generate a channel centerline, run it for a certain number of years, then save the resulting centerline for later use.
"""
import math
import matplotlib.pyplot as plt
import HKplus as hkp
import numpy as np
import pandas as pd
#Set Variables for centerline and curvature calculation
D=3.4 #constant width-average channel depth (m)
W = 100 #constant channel width (m)
deltas = W//2; #spacing of nodes along centerline
nit = 600 #number of iterations/how many timesteps to migrate the centerline
Cf = 0.005 #dimensionless Chezy friction factor
kl = 20/(365*24*60*60.0) #migration rate constant (m/s)
dt = 2*365*24*60*60.0 #time step (s)
pad= 200 #number of nodes for periodic boundary
saved_ts = 20 #timeteps between saving centerlines
crdist = 4*W #how close banks get before cutoff in m
#Set Variables fror Cutoff nonlocal efects
decay_rate = dt/(10*(365*24*60*60.0)); #this is the half-life on nonlocal effects, in units of seconds
bump_scale = 0 #this is the magntiude of nonlocal effects in relative difference between
#Set Result Directory
result_dir = "data/InitialChannel/" ##change this to wherever you want to save your results
#Choose name for centerline
name= "experiment007"
#Initiate Channel Object
ch = hkp.generate_initial_channel(W,D,deltas,pad)
#Initiate Channel Belt for migrating channel object
chb = hkp.ChannelBelt(channels=[ch],cutoffs=[],cl_times=[0.0],cutoff_times=[], cutoff_dists = [], decay_rate = decay_rate, bump_scale = bump_scale, cut_thresh = 1000, sinuosity=[1])
#Plot initial centerline
chb.plot_channels()
plt.title(str(int(nit*dt/(365*24*60*60.0)))+ " years at "+ str(kl*(365*24*60*60.0))+ "m/yr")
plt.show()
#Migrate Centerline for nit
chb.migrate_years(nit,saved_ts,deltas,pad,crdist,Cf,kl,dt)
#Plot Resulting Centerline
chb.plot_channels()
plt.title(str(int(nit*dt/(365*24*60*60.0)))+ " years at "+ str(kl*(365*24*60*60.0))+ "m/yr")
plt.show()
#Save Sinuosity time series
times = chb.cl_times
sins = chb.sinuosity
#uncommen t to save sinuosity series
#sinseries = pd.DataFrame({'time':times, 'sinuosity': sins});
#sinseries.to_csv(result_dir+"InitialCL_"+name+"sinseries"+".csv", header = True, index = False)
hkp.plot_sinuosity(times, sins)
plt.show()
#Save Resulting Centerline
xes = chb.channels[-1].x
yes = chb.channels[-1].y
cl = | pd.DataFrame({'x': xes, 'y': yes}) | pandas.DataFrame |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, p_dates in enumerate(dates_stack):
t = self.dates[i]
file_name = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except Exception:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47),
freq='H') # 47 hours: 00:00 -> 23:00
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates) # For each date we have prediction append the next 47 hours
area = self.area_group if isinstance(self.areas, dict) else self.areas
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], p_dates, self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
for i, p_dates in enumerate(dates_stack))
x = np.array([])
data_var = dict()
for var in self.variables:
if (var == 'WS' and self.static_data['type'] == 'wind') or \
(var == 'Flux' and self.static_data['type'] == 'pv'):
data_var[var + '_prev'] = x
data_var[var] = x
data_var[var + '_next'] = x
else:
data_var[var] = x
data_var['dates'] = x
x_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
x_3d = stack_3d(x_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.is_for_test:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, x_3d
def train_pca(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.n_jobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.n_jobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler': scaler, 'kpca': kpca}, fname)
def pca_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_pca(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import multiprocessing as mp
import os
def save_local_csv():
# collect rotated local structure into one pandas DataFrame file
for flag in ['train', 'validation', 'test']:
df_list = []
# count = 0
num = 0
flist = | pd.read_csv(f'cath_Ingraham_{flag}.txt') | pandas.read_csv |
"""
Parse FGDC metadata
"""
import re
from pathlib import Path
import geopandas as gpd
import pandas as pd
from bs4 import BeautifulSoup
from shapely.geometry import box
def parse_xml(xml, fields):
soup = BeautifulSoup(xml)
# Field names must be unique within the FGDC metadata
data = {}
for field in fields:
xml_field = soup.find(field)
data[field] = xml_field and xml_field.text
return data
def parse_meta(meta_dir):
meta_dir = Path(meta_dir)
xy_regex = re.compile(r'x(\d{2})y(\d{3})')
data = []
i = 0
for meta_file in meta_dir.glob('**/*.xml'):
name = meta_file.stem
x, y = xy_regex.search(name).groups()
project_dir = meta_file.parents[1].name
with meta_file.open() as f:
d = parse_xml(
f.read(),
fields=['utmzone', 'westbc', 'eastbc', 'northbc', 'southbc'])
d['name'] = name
d['x'] = x
d['y'] = y
d['project_dir'] = project_dir
data.append(d)
i += 1
if i % 1000 == 0:
print(i)
return pd.DataFrame(data)
def meta_to_wgs84(df):
"""Convert meta file with multiple UTM zones to WGS84
TODO check that projected position is near WGS bounds from metadata file. At
least a couple occurences of elevation in the ocean. Probably should be utm
11 instead of 10.
"""
df['x'] = pd.to_numeric(df['x'])
df['y'] = | pd.to_numeric(df['y']) | pandas.to_numeric |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = | concat(frames, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = | pd.period_range('1/1/2000', periods=5) | pandas.period_range |
# %%
import json
from collections import Counter
import matplotlib as mpl
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine
import scipy
import seaborn as sns
from matplotlib import rc
from pandas.plotting import register_matplotlib_converters
from plotnine import (
aes,
element_text,
facet_wrap,
geom_bar,
ggplot,
labs,
scale_color_hue,
theme,
theme_light,
)
from tqdm import notebook
font_path = "/usr/share/fonts/truetype/nanum/NanumBarunGothic.ttf"
font_name = fm.FontProperties(fname=font_path, size=10).get_name()
plt.rc("font", family=font_name, size=12)
plt.rcParams["figure.figsize"] = (20, 10)
register_matplotlib_converters()
mpl.font_manager._rebuild()
mpl.pyplot.rc("font", family="NanumGothic")
# %%
# 입력데이터 로드
train = pd.read_json("../input/melon-playlist/train.json", typ="frame")
test = pd.read_json("../input/melon-playlist/test.json", typ="frame")
val = pd.read_json("../input/melon-playlist/val.json", typ="frame")
genre = | pd.read_json("../input/melon-playlist/genre_gn_all.json", typ="series") | pandas.read_json |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, | Timestamp("2011-01-03") | pandas.Timestamp |
"""
@author: <NAME>
@email: <EMAIL>
script to generate node2vec embeddings
"""
from node2vec import Node2Vec
import json
import networkx as nx
import pandas as pd
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from stellargraph import StellarGraph
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import f1_score, classification_report
import seaborn as sns
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
if __name__ == "__main__":
data = json.loads(open("graph.json").read())
graph = nx.node_link_graph(data)
G = StellarGraph.from_networkx(graph)
print(G.info())
n2v = Node2Vec(graph, dimensions=128, workers=4, num_walks=10, walk_length=10)
model = n2v.fit(window=10, min_count=1, batch_words=4)
print(model)
ordered_vocab = [(term, voc.index, voc.count) for term, voc in model.wv.vocab.items()]
ordered_vocab = sorted(ordered_vocab, key=lambda k: k[2])
ordered_terms, term_indices, term_counts = zip(*ordered_vocab)
word_vectors = pd.DataFrame(model.wv.syn0[term_indices, :], index=ordered_terms)
word_vectors.to_csv("data_embds.csv", index=False)
#labels = word_vectors
pca = PCA(n_components=2)
components = pca.fit_transform(word_vectors)
_class= [graph.nodes[int(node_id)]["_class"] for node_id in ordered_terms]
components_2d = | pd.DataFrame({1:components[:,0], 2:components[:,1], "class":_class}, index=ordered_terms) | pandas.DataFrame |
import pandas as pd
import numpy as np
import inspect
# via stackoverflow and blog post... from <NAME> / <NAME> (?)
def sorted_alphanumeric(l, reverse=False):
"""
Sorts the given iterable alphanumerically.
If values are numeric, sort numerically; if strings, alphanumerically.
If string operations don't work we just sort normally; this works for
numeric types.
"""
try:
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key, reverse=reverse)
except TypeError:
return sorted(l, reverse=reverse)
def guess_positive(categories):
# try to guess which category should be 1 (vs 0)
# go through positive-looking values in order of priority (somewhat arbitrary)
for positive_value in ['true','yes','y','positive','pos','2','1']: # never
# go through categories in order, since with case-insensitive string
# representations, we could conceivably have more than one match
for category in reversed(categories):
if str(category).lower()==positive_value:
return category
# we didn't see a likely guess
return None
def guess_negative(categories):
# try to guess which category should be encoded as 0
# go through positive-looking values in order of priority (somewhat arbitrary)
for negative_value in ['false', 'no', 'negative', 'neg', 'n', '0', '1']:
# go through categories in order, since with case-insensitive string
# representations, we could conceivably have more than one match
for category in categories:
if str(category).lower()==negative_value:
return category
# we didn't see a likely guess
return None
def one_hot_single(data, categories=None, expand_binary=False,
name=None, base_category=None, treat_missing_as_zero=False):
"""
Given a series of M categorical values,
with N categories,
return (encoded, categories, base_category), where
- encoded is a binary-encoded MxN DataFrame of 0's and 1's,
where each column corresponds to a category
(or Mx(N-1) if we have a base category),
- categories is the list of categories used for encoding,
- base_category is the category that was encoded as a row of
zeroes, or None if no base was used.
The category name is encoded in the columns of the returned DataFrame,
i.e. each column name is of form {OriginalFieldName}_{CategoryName}.
If base_category is provided, encode this category as all zeroes, so that
one less column is created and there is no redundancy.
If expand_binary is False, and input series has only two levels,
keep it as one binary column even if base_category is not provided. In this case,
try to guess which of the two categories is which. The category treated
as base will be returned in base_category and will not appear in the output.
"""
if len(data.shape)>1:
data = data.iloc[:,0] # make Series
vec = data.astype('category')
if categories is None:
categories = vec.cat.categories
else:
# when categories are supplied, check they account for all data
# if user-supplied categories don't cover data,
# pandas' default behaviour will be to create missing data
# we instead want to raise an error
if not set(data).issubset(set(categories)):
print('data', data)
print('supplied categories', categories)
raise ValueError(
"Cannot encode data as categories in categorical field {}".format(name)
+ " do not match categories in previously-fit data. You must set your"
+ " categorical Series to have all expected category levels."
+ " New categories were: {}".format(set(data)-set(categories)))
vec.cat.set_categories(categories, inplace=True)
# Do encoding, with one column per category
# This will incorrectly encode missing values using index of code, i.e. index -1
encoded = pd.DataFrame(np.eye(len(categories), dtype=int)[vec.cat.codes])
# Fix missing values: set to zero or to missing
missing_rows = data.isnull()
if np.any(missing_rows):
if treat_missing_as_zero:
encoded.loc[list(missing_rows),:] = 0
else:
encoded.loc[list(missing_rows),:] = None
if name is None:
name = data.name
encoded.columns = ['{}_{}'.format(name, c) for c in vec.cat.categories]
encoded.index = data.index
# If base_category is None for >2 categories, use no base (create all columns)
# When there are 2 categories and expand_binary=False, we must pick a base
# If we are encoding for transform, in this situation, it will be set already
# If we are encoding for fit we may have to choose based on the data
if base_category is None and not expand_binary and len(categories)==2:
if vec.cat.ordered:
base_category = vec.cat.categories[0]
else:
# try to guess binary base_category (actually, guess positive)
positive_category = guess_positive(categories)
assert positive_category is None or positive_category in categories
# this will be the first category if positive_category was None,
# otherwise the non-positive category
base_category = [c for c in categories if c != positive_category][0]
# If we know or have picked base_category, encode this as zeroes by deleting that column
if base_category is not None:
encoded.drop("{}_{}".format(name, base_category), axis=1, inplace=True)
return (encoded,categories,base_category)
def one_hot_group(data, categories=None, expand_binary=False, base_category=None,
name=None, treat_missing_as_zero=False):
"""
Given a dataframe of only categorical values, with M rows,
and a total of N distinct categories,
return (encoded, categories, base_category), where
- encoded is a binary-encoded MxN DataFrame of 0's and 1's,
where each column corresponds to a category
(or Mx(N-1) if we have a base category),
- categories is the list of categories used for encoding,
- base_category is the category that was encoded as a row of
zeroes, or None if no base was used. For one_hot_group this will be the
same base_category that was passed in.
All columns are treated as entries in the *same* category,
i.e. more than one column in a row of the output encoding may be 1.
If any input column is null, it is ignored and fewer 1's are set for that row.
However, if treat_missing_as_zero==False (the default), and ALL inputs for
a row are null, return a row of None's - i.e. assume data is missing.
If treat_missing_as_zero==True, return a row of zeroes - i.e. assume the values
for that row are not missing, but that there are truly no categories set.
The category name is encoded in the columns of the returned DataFrame,
i.e. each column name is of form {name}_{CategoryName}, where name is provided.
If category levels between input columns are not identical, the union of
categories will be used, sorted alphanumerically in output.
If base_category is provided, encode this category as all zeroes, so that
one less column is created and there is no redundancy.
"""
# First make sure categorical; re-encode consistently later
for c in data:
data[c] = data[c].astype('category')
# Get all categories in all columns
category_list = [data[c].cat.categories for c in data]
observed_categories = set(sum(category_list,[]))
# If categories were provided, check they cover data
if categories is not None:
if not set(observed_categories).issubset(set(categories)):
raise ValueError(
"Cannot encode data as categories in categorical field {}".format(name)
+ " do not match expected categories."
+ " If you used fit_df, you must set your training data Categorical dtype"
+ " to have all expected category levels."
+ " New categories were: {}".format(set(observed_categories)-set(categories)))
else:
# if any one category-set is a superset of all the others, use that
for cats in category_list:
if set(cats) == all_categories:
categories = cats
break
# if still no valid category set, use union of all
if categories is None:
categories = sorted_alphanumeric(set.union(*[set(cats) for cats in category_list]))
# Re-encode category codes consistently
# (and unordered - this should not matter?)
for c in data:
data[c].cat.set_categories(categories, inplace=True)
# Encode each column and take union
encoded = np.zeros((len(data), len(categories)), dtype=int)
for c in data:
# NB this sets missing values to code -1, which will be mis-encoded by np indexing
vec = data[c].cat.codes
encoded_var = np.eye(len(categories))[vec]
# Fix missing values: set to zero for sake of OR operation
encoded_var[data[c].isnull(),:] = 0
encoded = np.logical_or(encoded, encoded_var)
encoded = pd.DataFrame(encoded.astype(int))
# If desired, set entirely missing values to missing
if not treat_missing_as_zero:
missing_rows = np.all(data.isnull(),axis=1)
if np.any(missing_rows):
encoded.loc[list(missing_rows),:] = None
if name is None:
name = '_'.join(data.columns)
encoded.columns = ['{}_{}'.format(name, c) for c in categories]
encoded.index = data.index
# If we know base_category, encode this as zeroes by deleting that column
if base_category is not None:
encoded.drop("{}_{}".format(name, base_category), axis=1, inplace=True)
return (encoded,categories,base_category)
def one_hot(data, categories=None, expand_binary=False, base_category=None,
name=None, treat_missing_as_zero=False):
"""
Binary-encode categorical data.
We can handle a single Series or a categorical DataFrame where each column
is to be encoded into the same output variable.
Calls one_hot_single() and one_hot_group().
"""
if base_category is not None and treat_missing_as_zero:
raise ValueError("Cannot use base_category when treat_missing_as_zero "
+ "is True; meaning of all-zero row is overloaded.")
if len(data.shape)==1 or data.shape[1]==1:
# We have a single column
return one_hot_single(data, categories=categories,
expand_binary=expand_binary, base_category=base_category,
name=name, treat_missing_as_zero=treat_missing_as_zero)
else:
# We have a group of multiple columns
return one_hot_group(data, categories=categories,
expand_binary=expand_binary, base_category=base_category,
name=name, treat_missing_as_zero=treat_missing_as_zero)
def one_hot_many(df,
expand_binary=False,
base_categories=None,
grouped_columns=None,
treat_missing_as_zero=False,
map_features=None):
"""
Given a dataframe containing all categorical columns,
one-hot encode them all.
dtypes which can be converted to categories (e.g. strings and booleans) will
also be handled, although any categories missing in the data will not be known.
Convenient usage is to pass in your df with .select_dtypes([np.object, 'category', 'bool']).
If base_categories is provided, it should be a dict mapping
df column names to the category representing the null value for that column.
These will be passed as base_category to one_hot()
grouped_columns should be a dict of the form {outputfieldname:[listofinputfields]}.
Any fields in these lists will be encoded as if they were one binary-encoded
variable, with the union of their values treated as levels in the variable.
"""
if base_categories is None:
base_categories = {}
if grouped_columns is None:
grouped_columns = {}
if map_features is None:
map_features = {}
grouped_inputs = set(sum(grouped_columns.values(),[]))
for field in df.columns:
if field not in grouped_inputs:
grouped_columns[field] = [field]
for field in grouped_inputs:
if field not in df.columns:
raise ValueError('Grouped field {} not in input dataframe'.format(field))
encoded_list = []
feature_mapping = dict()
base_categories_used = dict()
for (fieldname,inputlist) in grouped_columns.items():
encoded_field, categories, base_category = one_hot(
df[inputlist],
treat_missing_as_zero=treat_missing_as_zero,
categories=map_features.get(fieldname,None),
base_category=base_categories.get(fieldname,None),
name=fieldname)
encoded_list.append(encoded_field)
feature_mapping[fieldname] = list(categories)
base_categories_used[fieldname] = base_category # may be none
return (pd.concat(encoded_list, axis=1),
feature_mapping,
base_categories_used)
# datetimes are not yet handled
def encode(df,
expand_binary=False,
base_categories=None,
grouped_columns=None,
treat_missing_as_zero=False,
drop_unhandled=False,
map_features=None):
"""
Encode columns of a dataframe numerically, according to their Series dtype.
Return
- the encoded dataframe
- a dict giving any mappings from original columns to encoded columns
- any base categories used for categoricals to remove redundant columns
Encodings are:
- categoricals, objects/strings and booleans will be binary (one-hot) encoded
- numeric columns will be unchanged
- datetimes are currently not handled correctly and will be unchanged, and
a warning will be issued
"""
# Categories
category_columns = df.select_dtypes([np.object, 'category', 'bool']).columns
if len(category_columns) > 1:
encoded_categories, feature_mapping, base_categories_used = one_hot_many(
df.select_dtypes([np.object, 'category', 'bool']),
expand_binary=expand_binary,
base_categories=base_categories,
grouped_columns=grouped_columns,
treat_missing_as_zero=treat_missing_as_zero,
map_features = map_features)
else:
encoded_categories = pd.DataFrame(index=df.index)
feature_mapping = dict()
# Numbers
numeric_columns = df.select_dtypes([np.number]).columns
# Dates
date_columns = df.select_dtypes(['datetime']).columns
if len(date_columns) > 0:
print("Warning: dates not yet handled by encoding: {}".format(date_columns))
encoded_columns = set.union(
set(category_columns),set(numeric_columns),set(date_columns))
unhandled_columns = list(set(df.columns) - encoded_columns)
if len(unhandled_columns) > 0:
print("Warning: some column types were not recognised during encoding: {}".format(unhandled_columns))
if drop_unhandled:
result = pd.concat([encoded_categories,
df[numeric_columns],
df[date_columns]], axis=1),
else:
# we are keeping these columns as the user apparently thinks numpy will handle them
result = pd.concat([encoded_categories,
df[numeric_columns],
df[date_columns],
df[unhandled_columns]], axis=1)
return (result, feature_mapping, base_categories_used)
fit_docstring = '''
fit_df wraps the fit method, allowing X to be a pandas.DataFrame.
Refer to the fit method documentation for functionality details.
fit parameters are: {}
'''
transform_docstring = '''
transform_df wraps the transform method, allowing X to be a pandas.DataFrame.
Refer to the transform method documentation for functionality details.
transform parameters are: {}
'''
predict_docstring = '''
predict_df wraps the predict method, allowing X to be a pandas.DataFrame.
This method returns a Series of class predictions.
Refer to the predict method documentation for functionality details.
predict parameters are: {}
'''
predict_log_proba_docstring = '''
predict_log_proba_df wraps the predict_log_proba method, allowing X to be a pandas.DataFrame.
This method returns a DataFrame of log-probabilities.
Refer to the predict_log_proba method documentation for functionality details.
predict_log_proba parameters are: {}
'''
predict_proba_docstring = '''
predict_proba_df wraps the predict_proba method, allowing X to be a pandas.DataFrame.
This method returns a DataFrame of probabilities.
Refer to the predict_proba method documentation for functionality details.
predict_proba parameters are: {}
'''
feature_importances_docstring = '''
feature_importances_df_ is a property wrapping feature_importances_,
allowing X to be a pandas.DataFrame. It returns feature importances
as a pandas Series representing the original dataframe fields.
For categorical variables, importances are calculated using the
sqrt of the sum of squares of mapped feature importances.
Refer to the feature_importances_ property documentation for functionality details.
'''
def encode_for_fit(model,
X,
expand_binary=False,
base_categories=None,
grouped_columns=None,
treat_missing_as_zero=False,
drop_unhandled=False):
'''
Encode a dataframe as numerical values, and store all needed metadata
about the encoding in the model.
feature_mapping stored specifies the categories used in the fit, which will
be taken from the Series.cat categories if available, and their order in
the encoding, with the first category considered the base category.
If expand_binary=False and there are two categories for some feature,
the first of the categories in the list will not occur in the encoded data
and will be stored in feature_mapping but not feature_lookup.
feature_lookup covers encoded fieldnames of all types, not just categories,
and allows us to look up the feature name from the original fit dataframe.
'''
# we don't pass in map_features; this will be based on category dtypes
encoded, feature_mapping, base_categories_used = encode(X,
expand_binary=expand_binary, treat_missing_as_zero=treat_missing_as_zero,
base_categories=base_categories, grouped_columns=grouped_columns,
drop_unhandled=drop_unhandled)
model.expand_binary = expand_binary
if grouped_columns is None:
grouped_columns = {}
model.grouped_columns = grouped_columns
model.treat_missing_as_zero = treat_missing_as_zero
model.drop_unhandled = drop_unhandled
model.base_categories_used = base_categories_used
model.features_original = list(X.columns)
model.features_encoded = list(encoded.columns)
model.feature_mapping = feature_mapping
# store lookup of original feature names for all encoded columns
model.feature_lookup = {}
for (feature,mappedlist) in feature_mapping.items():
if not expand_binary and len(mappedlist)==2:
mappedlist = mappedlist[1:]
for mapped in mappedlist:
model.feature_lookup['{}_{}'.format(feature,mapped)] = feature
# store lookup of original feature names for unmapped features (i.e. non-categorical)
for feature in model.features_encoded:
if feature not in model.feature_lookup:
model.feature_lookup[feature] = feature
return encoded
def encode_for_transform(model, X):
'''
Encode a dataframe as numerical values, using the same encoding as used
in the fit.
'''
encoded, _feature_mapping, _base_categories = encode(
X,
expand_binary=model.expand_binary,
base_categories=model.base_categories_used,
map_features=model.feature_mapping,
grouped_columns=model.grouped_columns,
treat_missing_as_zero=model.treat_missing_as_zero,
drop_unhandled=model.drop_unhandled)
return encoded
def add_fit(model):
parameters = inspect.signature(model.fit).parameters
if 'y' in parameters and parameters['y'].default == inspect._empty:
# if we define this elsewhere, model.fit_df.__name__ will be wrong
def fit_df(self,
X,
y,
expand_binary=False,
base_categories=None,
grouped_columns=None,
treat_missing_as_zero=False,
drop_unhandled=False,
**kwargs):
'''
fit_df method to be added to the model.
This docstring should be overwritten - if it's visible,
please report a bug.
'''
encoded = encode_for_fit(self, X, expand_binary=expand_binary,
base_categories=base_categories, grouped_columns=grouped_columns,
treat_missing_as_zero=treat_missing_as_zero,
drop_unhandled=drop_unhandled)
return self.fit(encoded, y, **kwargs)
elif 'y' in parameters:
def fit_df(self,
X,
y=parameters['y'].default,
expand_binary=False,
base_categories=None,
grouped_columns=None,
treat_missing_as_zero=False,
drop_unhandled=False,
**kwargs):
'''
fit_df method to be added to the model.
This docstring should be overwritten - if it's visible,
please report a bug.
'''
encoded = encode_for_fit(self, X, expand_binary=expand_binary,
base_categories=base_categories, grouped_columns=grouped_columns,
treat_missing_as_zero=treat_missing_as_zero,
drop_unhandled=drop_unhandled)
return self.fit(encoded, y, **kwargs)
else:
def fit_df(self,
X,
expand_binary=False,
base_categories=None,
grouped_columns=None,
treat_missing_as_zero=False,
drop_unhandled=False,
**kwargs):
'''
fit_df method to be added to the model.
This docstring should be overwritten - if it's visible,
please report a bug.
'''
encoded = encode_for_fit(self, X, expand_binary=expand_binary,
base_categories=base_categories, grouped_columns=grouped_columns,
treat_missing_as_zero=treat_missing_as_zero,
drop_unhandled=drop_unhandled)
return self.fit(encoded, **kwargs)
model.fit_df = fit_df.__get__(model)
model.fit_df.__func__.__qualname__ = '.'.join(
model.fit.__qualname__.split('.')[:-1] + ['fit_df'])
model.fit_df.__func__.__doc__ = fit_docstring.format(
str(inspect.signature(model.fit)))
# if preserving original docstring would add model.fit.__doc__
def add_transform(model):
def transform_df(self, X, **kwargs):
'''
transform_df method to be added to the model.
This docstring should be overwritten - if it's visible,
please report a bug.
'''
encoded = encode_for_transform(self, X)
return self.transform(encoded, **kwargs)
model.transform_df = transform_df.__get__(model)
model.transform_df.__func__.__qualname__ = '.'.join(
model.transform.__qualname__.split('.')[:-1] + ['transform_df'])
model.transform_df.__func__.__doc__ = transform_docstring.format(
str(inspect.signature(model.transform)))
def add_predict(model):
def predict_df(self, X, **kwargs):
'''
predict_df method to be added to the model.
This docstring should be overwritten - if it's visible, please report a bug.
'''
encoded = encode_for_transform(self, X)
# TODO: encode with categorical matching input y?
# will already do classes based on classes_
result = self.predict(encoded, **kwargs)
return pd.Series(result, index=X.index)
model.predict_df = predict_df.__get__(model)
model.predict_df.__func__.__qualname__ = '.'.join(
model.predict.__qualname__.split('.')[:-1] + ['predict_df'])
model.predict_df.__func__.__doc__ = transform_docstring.format(
str(inspect.signature(model.predict)))
def add_predict_proba(model):
def predict_proba_df(self, X, **kwargs):
'''
predict_proba_df method to be added to the model.
This docstring should be overwritten - if it's visible,
please report a bug.
'''
encoded = encode_for_transform(self, X)
result = self.predict_proba(encoded, **kwargs)
return pd.DataFrame(result, index=X.index, columns=self.classes_)
model.predict_proba_df = predict_proba_df.__get__(model)
model.predict_proba_df.__func__.__qualname__ = '.'.join(
model.predict_proba.__qualname__.split('.')[:-1]
+ ['predict_proba_df'])
model.predict_proba_df.__func__.__doc__ = transform_docstring.format(
str(inspect.signature(model.predict_proba)))
def add_predict_log_proba(model):
def predict_log_proba_df(self, X, **kwargs):
'''
predict_log_proba_df method to be added to the model.
This docstring should be overwritten - if it's visible, please report a bug.
'''
encoded = encode_for_transform(self, X)
result = self.predict_log_proba(encoded, **kwargs)
return pd.DataFrame(result, index=X.index, columns=self.classes_)
model.predict_log_proba_df = predict_log_proba_df.__get__(model)
model.predict_log_proba_df.__func__.__qualname__ = '.'.join(
model.predict_log_proba.__qualname__.split('.')[:-1]
+ ['predict_log_proba_df'])
model.predict_log_proba_df.__func__.__doc__ = transform_docstring.format(
str(inspect.signature(model.predict_log_proba)))
def add_feature_importances(model):
def feature_importances_df(self):
'''
feature_importances_df_ property to be added to the model.
This docstring should be overwritten - if it's visible,
please report a bug.
'''
raw_importances = | pd.Series(self.feature_importances_, index=self.features_encoded) | pandas.Series |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate= | pd.DataFrame(dateofterms) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.