prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import warnings
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_drop(idx):
dropped = idx.drop([("foo", "two"), ("qux", "one")])
index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")])
dropped2 = idx.drop(index)
expected = idx[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = idx.drop(["bar"])
expected = idx[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop("foo")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(index)
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(["foo", "two"])
# partially correct argument
mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(mixed_index)
# error='ignore'
dropped = idx.drop(index, errors="ignore")
expected = idx[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(["foo", "two"], errors="ignore")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = idx.drop(["foo", ("qux", "one")])
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ["foo", ("qux", "one"), "two"]
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(mixed_index)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(idx):
index = idx[idx.get_loc("foo")]
dropped = index.droplevel(0)
assert dropped.name == "second"
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index.droplevel(0)
assert dropped.names == ("two", "three")
dropped = index.droplevel("two")
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list():
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index[:2].droplevel(["three", "one"])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
msg = (
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left"
)
with pytest.raises(ValueError, match=msg):
index[:2].droplevel(["one", "two", "three"])
with pytest.raises(KeyError, match="'Level four not found'"):
index[:2].droplevel(["one", "four"])
def test_drop_not_lexsorted():
# GH 12078
# define the lexsorted version of the multi-index
tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"])
assert lexsorted_mi._is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
df = df.pivot_table(index="a", columns=["b", "c"], values="d")
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi._is_lexsorted()
# compare the results
| tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi) | pandas._testing.assert_index_equal |
# -*- coding: utf-8 -*-
# Copyright FMR LLC <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
import pandas as pd
from jurity.classification import BinaryClassificationMetrics
class TestClassificationMetrics(unittest.TestCase):
def test_accuracy(self):
# Data
actual = [1, 1, 0, 1, 0, 0]
predicted = [1, 1, 0, 1, 0, 0]
# Metric
metric = BinaryClassificationMetrics.Accuracy()
# Score
score = metric.get_score(actual, predicted)
self.assertEqual(score, 1)
def test_accuracy_numpy(self):
# Data
actual = np.array([1, 1, 0, 1, 0, 0])
predicted = np.array([0, 0, 0, 0, 0, 0])
# Metric
metric = BinaryClassificationMetrics.Accuracy()
# Score
score = metric.get_score(actual, predicted)
self.assertEqual(score, 0.5)
def test_accuracy_pandas(self):
# Data
actual = pd.Series([0, 0, 0, 0, 0, 0])
predicted = pd.Series([0, 0, 0, 0, 0, 0])
# Metric
metric = BinaryClassificationMetrics.Accuracy()
# Score
score = metric.get_score(actual, predicted)
self.assertEqual(score, 1)
def test_accuracy_non_binary_input(self):
# Data
actual = [0, 1, 2, 0, 0, 0]
predicted = [0, 0, 0, 0, 0, 0]
# Metric
metric = BinaryClassificationMetrics.Accuracy()
# Score
with self.assertRaises(ValueError):
metric.get_score(actual, predicted)
def test_accuracy_non_zero_one_input(self):
# Data
actual = ['a', 'b', 'a', 'a']
predicted = ['a', 'b', 'a', 'a']
# Metric
metric = BinaryClassificationMetrics.Accuracy()
# Score
with self.assertRaises(ValueError):
metric.get_score(actual, predicted)
def test_auc(self):
# Data
actual = [1, 1, 0, 1, 0, 0]
likelihoods = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
# Metric
metric = BinaryClassificationMetrics.AUC()
# Score
score = metric.get_score(actual, likelihoods)
self.assertEqual(score, 0.5)
def test_auc_numpy(self):
# Data
actual = np.array([1, 1, 0, 1, 0, 0])
likelihoods = np.array([1, 1, 1, 0.5, 0.5, 0.5])
# Metric
metric = BinaryClassificationMetrics.AUC()
# Score
score = metric.get_score(actual, likelihoods)
self.assertEqual(score, 0.6666666666666667)
def test_auc_pandas(self):
# Data
actual = pd.Series([0, 0, 0, 0, 0, 1])
likelihoods = pd.Series([0, 0, 0, 0.5, 0.5, 0.5])
# Metric
metric = BinaryClassificationMetrics.AUC()
# Score
score = metric.get_score(actual, likelihoods)
self.assertEqual(score, 0.8)
def test_auc_non_binary_input(self):
# Data
actual = [0, 1, 2, 0, 0, 0]
likelihoods = [0, 0, 0, 0.5, 0.5, 0.5]
# Metric
metric = BinaryClassificationMetrics.AUC()
# Score
with self.assertRaises(ValueError):
metric.get_score(actual, likelihoods)
def test_auc_non_zero_one_input(self):
# Data
actual = ['a', 'b', 'a', 'a']
likelihoods = [0, 0, 0, 0.5, 0.5, 0.5]
# Metric
metric = BinaryClassificationMetrics.AUC()
# Score
with self.assertRaises(ValueError):
metric.get_score(actual, likelihoods)
def test_auc_likelihood_input(self):
# Data
actual = [1, 1, 0, 1, 0, 0]
likelihoods = [100, 0, 0, 0.5, 0.5, 0.5]
# Metric
metric = BinaryClassificationMetrics.AUC()
# Score
with self.assertRaises(ValueError):
metric.get_score(actual, likelihoods)
def test_f1(self):
# Data
actual = [1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1]
predicted = [1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0]
# Metric
metric = BinaryClassificationMetrics.F1()
# Score
score = metric.get_score(actual, predicted)
self.assertEqual(score, 0.6666666666666666)
def test_f1_numpy(self):
# Data
actual = np.array([1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1])
predicted = np.array([1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0])
# Metric
metric = BinaryClassificationMetrics.F1()
# Score
score = metric.get_score(actual, predicted)
self.assertEqual(score, 0.6666666666666666)
def test_f1_pandas(self):
# Data
actual = pd.Series([1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1])
predicted = | pd.Series([1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0]) | pandas.Series |
"""
Functions for retrieving summary data from a dataset.
"""
from __future__ import annotations
import typing
from collections import defaultdict
import datetime
import warnings
import numpy as np
import pandas as pd
import pandas.io.formats.style
import idelib.dataset
from .measurement import MeasurementType, ANY, get_channels
from .files import get_doc
from .util import parse_time
__all__ = [
"get_channel_table",
"to_pandas",
"get_primary_sensor_data",
]
# ============================================================================
# Display formatting functions
# ============================================================================
def format_channel_id(ch: idelib.dataset.Channel) -> str:
""" Function for formatting an `idelib.dataset.Channel` or `SubChannel`
for display. Renders as only the channel and subchannel IDs (the other
information is shown in the rest of the table).
:param ch: The `idelib.dataset.Channel` or `idelib.dataset.SubChannel`
to format.
:return: A formatted "channel.subchannel" string.
"""
try:
if ch.parent:
return f"{ch.parent.id}.{ch.id}"
else:
return f"{ch.id}.*"
except (AttributeError, TypeError, ValueError) as err:
warnings.warn(f"format_channel_id({ch!r}) raised {type(err).__name__}: {err}")
return str(ch)
def format_timedelta(val: typing.Union[int, float, datetime.datetime, datetime.timedelta]) -> str:
""" Function for formatting microsecond timestamps (e.g., start, end,
or duration) as times. Somewhat more condensed than the standard
`DataFrame` formatting of `datetime.timedelta`.
:param val: The `pandas.Timedelta` or `datetime.timedelta` to format.
Will also work with microseconds as `float` or `int`.
:return: A formatted time 'duration' string.
"""
try:
if isinstance(val, datetime.timedelta):
td = pd.Timedelta(val)
else:
td = pd.Timedelta(microseconds=val)
# NOTE: `components` attr only exists in pandas `Timedelta`
c = td.components
s = f"{c.minutes:02d}:{c.seconds:02d}.{c.milliseconds:04d}"
if c.hours or c.days:
s = f"{c.hours:02d}:{s}"
if c.days:
s = f"{c.days}d {s}"
return s
except (AttributeError, TypeError, ValueError) as err:
warnings.warn(f"format_timedelta({val!r}) raised {type(err).__name__}: {err}")
return str(val)
def format_timestamp(ts: typing.Union[int, float]) -> str:
""" Function for formatting start/end timestamps. Somewhat more condensed
than the standard Pandas formatting.
:param ts: The timestamps in microseconds. Rendered as integers, since
`idelib` timestamps have whole microsecond resolution.
:return: A formatted timestamp string, with units.
"""
try:
return f"{int(ts)} µs"
except (TypeError, ValueError) as err:
warnings.warn(f"format_timestamp({ts!r}) raised {type(err).__name__}: {err}")
return str(ts)
# ============================================================================
#
# ============================================================================
""" The default table formatting. """
TABLE_FORMAT = {
'channel': format_channel_id,
'start': format_timedelta,
'end': format_timedelta,
'duration': format_timedelta,
'rate': "{:.2f} Hz",
}
def get_channel_table(dataset: typing.Union[idelib.dataset.Dataset, list],
measurement_type=ANY,
start: typing.Union[int, float, str, datetime.datetime, datetime.timedelta] = 0,
end: typing.Optional[int, float, str, datetime.datetime, datetime.timedelta] = None,
formatting: typing.Optional[dict] = None,
index: bool = True,
precision: int = 4,
timestamps: bool = False,
**kwargs) -> typing.Union[pd.DataFrame, pd.io.formats.style.Styler]:
""" Get summary data for all `SubChannel` objects in a `Dataset` that
contain one or more type of sensor data. By using the optional
`start` and `end` parameters, information can be retrieved for a
specific interval of time.
The `start` and `end` times, if used, may be specified in several
ways:
* `int`/`float` (Microseconds from the recording start)
* `str` (formatted as a time from the recording start, e.g., `MM:SS`,
`HH:MM:SS`, `DDd HH:MM:SS`). More examples:
* ``":01"`` or ``":1"`` or ``"1s"`` (1 second)
* ``"22:11"`` (22 minutes, 11 seconds)
* ``"3:22:11"`` (3 hours, 22 minutes, 11 seconds)
* ``"1d 3:22:11"`` (1 day, 3 hours, 22 minutes, 11 seconds)
* `datetime.timedelta` or `pandas.Timedelta` (time from the
recording start)
* `datetime.datetime` (an explicit UTC time)
:param dataset: A `idelib.dataset.Dataset` or a list of
channels/subchannels from which to build the table.
:param measurement_type: A :py:class:`~endaq.ide.MeasurementType`, a
measurement type 'key' string, or a string of multiple keys
generated by adding and/or subtracting
:py:class:`~endaq.ide.MeasurementType` objects to filter the
results. Any 'subtracted' types will be excluded.
:param start: The starting time. Defaults to the start of the
recording.
:param end: The ending time. Defaults to the end of the recording.
:param formatting: A dictionary of additional style/formatting items
(see `pandas.DataFrame.style.format()`). If `False`, no additional
formatting is applied.
:param index: If `True`, show the index column on the left.
:param precision: The default decimal precision to display. Can be
changed later.
:param timestamps: If `True`, show the start and end as raw
microsecond timestamps.
:returns: A table (`pandas.io.formats.style.Styler`) of summary data.
:rtype: pandas.DataFrame
"""
# We don't support multiple sessions on current Slam Stick/enDAQ recorders,
# but in the event we ever do, this allows one to be specified like so:
# :param session: A `Session` or session ID to retrieve from a
# multi-session recording.
# Leave out of docstring until we ever support it.
session = kwargs.get('session', None)
if session:
session = getattr(session, 'sessionId', session)
if hasattr(dataset, 'getPlots'):
sources = get_channels(dataset, measurement_type)
else:
sources = dataset
result = defaultdict(list)
for source in sources:
range_start = range_end = duration = rate = session_start = None
samples = 0
data = source.getSession(session)
if data.session.utcStartTime:
session_start = datetime.datetime.utcfromtimestamp(data.session.utcStartTime)
start = parse_time(start, session_start)
end = parse_time(end, session_start)
if len(data):
if not start and not end:
start_idx, end_idx = 0, -1
samples = len(data)
else:
start_idx, end_idx = data.getRangeIndices(start, end)
end_idx = min(len(data) - 1, end_idx)
if end_idx < 0:
samples = len(data) - start_idx - 1
else:
samples = end_idx - start_idx
range_start = data[int(start_idx)][0]
range_end = data[int(end_idx)][0]
duration = range_end - range_start
rate = samples / (duration / 10 ** 6)
result['channel'].append(source)
result['name'].append(source.name)
result['type'].append(source.units[0])
result['units'].append(source.units[1])
result['start'].append(range_start)
result['end'].append(range_end)
result['duration'].append(duration)
result['samples'].append(samples)
result['rate'].append(rate)
# # TODO: RESTORE AFTER FIX IN idelib
# dmin, dmean, dmax = data.getRangeMinMeanMax(start, end)
# result['min'].append(dmin)
# result['mean'].append(dmean)
# result['max'].append(dmax)
if formatting is False:
return pd.DataFrame(result).style
style = TABLE_FORMAT.copy()
if timestamps:
style.update({
'start': format_timestamp,
'end': format_timestamp
})
if isinstance(formatting, dict):
style.update(formatting)
styled = pd.DataFrame(result).style.format(style, precision=precision)
if not index:
return styled.hide_index()
else:
return styled
# ============================================================================
#
# ============================================================================
def to_pandas(
channel: typing.Union[idelib.dataset.Channel, idelib.dataset.SubChannel],
time_mode: typing.Literal["seconds", "timedelta", "datetime"] = "datetime",
) -> pd.DataFrame:
""" Read IDE data into a pandas DataFrame.
:param channel: a `Channel` object, as produced from `Dataset.channels`
or :py:func:`endaq.ide.get_channels`
:param time_mode: how to temporally index samples; each mode uses either
relative times (with respect to the start of the recording) or
absolute times (i.e., date-times):
* `"seconds"` - a `pandas.Float64Index` of relative timestamps, in seconds
* `"timedelta"` - a `pandas.TimeDeltaIndex` of relative timestamps
* `"datetime"` - a `pandas.DateTimeIndex` of absolute timestamps
:return: a `pandas.DataFrame` containing the channel's data
"""
data = channel.getSession().arraySlice()
t, data = data[0], data[1:].T
t = (1e3*t).astype("timedelta64[ns]")
if time_mode == "seconds":
t = t / np.timedelta64(1, "s")
elif time_mode == "datetime":
t = t + np.datetime64(channel.dataset.lastUtcTime, "s")
elif time_mode != "timedelta":
raise ValueError(f'invalid time mode "{time_mode}"')
if hasattr(channel, "subchannels"):
columns = [sch.name for sch in channel.subchannels]
else:
columns = [channel.name]
return pd.DataFrame(data, index= | pd.Series(t, name="timestamp") | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 17:37:51 2020
@author: sawleen
"""
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
os.chdir('/Users/sawleen/Documents/Leen/Python/stock_analysis')
import data.get_yf_data as get_yf_data #Configured to the same root folder where display_webpg.py resides
import data.get_sgi_data as get_sgi_data #Configured to the same root folder where display_webpg.py resides
import data.get_morningstar_data as get_ms_data
import time
import math
class Update():
#### Get sector summaries (generate main list)
def prep_sector_summaries(self, stocks_map, stock_sectors, new_sectors, new_stocks=None):
summary_all_df = pd.DataFrame([]) # To track for all sectors
start_time = time.time()
# New entries detected
if new_sectors != 'All':
summary_all_df = pd.read_csv('data/sector_summaries/All.csv', index_col=None)
# Get all health metrics first
# Health metrics require selenium, which is prone to disconnections
health_metrics_dict_all = self.get_all_health_metrics(new_stocks)
for sector_to_update in new_sectors:
print('Sector to update: {}'.format(sector_to_update))
summary_df = pd.read_csv('data/sector_summaries/{}.csv'.format(sector_to_update), index_col=None)
for symbol in new_stocks:
# Update CSV for indiv sector
current_sector = stocks_map.loc[stocks_map['SGX_Symbol'] == symbol, ['Sector']].values[0][0]
print('Current stock sector: {}'.format(current_sector))
if current_sector == sector_to_update:
stocks_map_filtered = stocks_map.loc[stocks_map['SGX_Symbol'] == symbol, stocks_map.columns]
[summary_df, summary_all_df] = self.get_summary_df(sector_to_update, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df)
# Sector summary
summary_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_df.to_csv('data/sector_summaries/{}.csv'.format(sector_to_update), index=False)
summary_all_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_all_df.to_csv('data/sector_summaries/All.csv',index=False)
# No new entries but update for ALL sectors
else:
#expected_runtime = int(len(stocks_map)/60*15) # expected time to print to screen
print('Updating summary for all sectors...')
#print('Please hold on for about {}min...'.format(expected_runtime))
summary_all_df = pd.DataFrame([])
# Get all health metrics first
# Health metrics require selenium, which is prone to disconnections
symbols=stocks_map['SGX_Symbol']
health_metrics_dict_all = self.get_all_health_metrics(symbols)
for sector in stock_sectors:
summary_df = pd.DataFrame([])
if sector!= 'All':
stocks_map_filtered = stocks_map.loc[stocks_map['Sector'] == sector, stocks_map.columns]
[summary_df, summary_all_df] = self.get_summary_df(sector, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df)
# Sector summary
summary_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_df.to_csv('data/sector_summaries/{}.csv'.format(sector), index=False)
# All stocks summary
print('Sorting sector summary for ALL stocks...')
summary_all_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_all_df.to_csv('data/sector_summaries/All.csv', index=False)
total_time = round((time.time() - start_time)/60,2)
print('Total time taken: {}'.format(total_time))
#### End of prep_sector_summaries
def get_summary_df(self, sector_to_update, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df):
print('Prepping sector summary for {}...'.format(sector_to_update))
for sgx_symbol in stocks_map_filtered['SGX_Symbol']:
print('{}...'.format(sgx_symbol))
yf_data = get_yf_data.Data(sgx_symbol)
industry = yf_data.get_industry()
stats = yf_data.get_basic_stats()
[inc_yoy_avg_growth_df, inc_yrly_growth_df] = yf_data.process_inc_statement()
dividends_df = yf_data.get_dividends()
try:
div_fwd = dividends_df.loc[dividends_df['Dividend Type']=='Forward',['Values']].values[0][0]
except:
print('! Warning: No forward dividend data fetched for {}'.format(sgx_symbol))
div_fwd = math.nan
short_name = yf_data.get_name_short()
disp_name = yf_data.get_name_disp()
if '.SI' in sgx_symbol and type(short_name)==str:
sgi_data = get_sgi_data.Data(sgx_symbol, short_name)
url_tprice = sgi_data.get_sginvestor_url(sgx_symbol, short_name, industry)
#print(url_tprice)
soup_tprice = sgi_data.get_soup_tprice(url_tprice)
tpcalls = sgi_data.get_tpcalls(soup_tprice)
tpcalls_df = sgi_data.get_tpcalls_df(tpcalls)
strategies_summary = sgi_data.get_strategies_summary(tpcalls_df)
else: # create empty dataframe
strategies_summary = pd.DataFrame(index=[0],columns=['Strategy','Tally(%)','Tally'])
health_metrics = health_metrics_dict_all[sgx_symbol]
info={'Name':disp_name,
'Symbol':sgx_symbol,
'Market Cap (bil)':stats['Market Cap (bil)'],
'PB Ratio': stats['PB Ratio'],
'PE Ratio': stats['PE Ratio'],
'Dividend Payout Ratio': stats['Dividend Payout Ratio'],
'Income Growth (Avg YoY)':inc_yoy_avg_growth_df['Income'].values[0],
'ROE': stats['% Return on Equity'],
'Dividend (fwd)': div_fwd,
'Strategy': strategies_summary.at[0,'Strategy'],
'Tally(%)': strategies_summary.at[0,'Tally(%)'],
'Tally': strategies_summary.at[0,'Tally'],
'Price/Cash Flow':health_metrics['Price/Cash Flow'],
'Debt/Equity':health_metrics['Debt/Equity'],
'Interest Coverage':health_metrics['Interest Coverage']}
# Stock summary
info_df = | pd.DataFrame.from_dict(info, orient='columns') | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import json
from usuario import Usuario
from evento import Evento
import sanconnect_parsers as parsers
import cmd
from sklearn import tree
from sklearn.model_selection import cross_val_score
from operator import itemgetter
from sklearn.externals import joblib
import graphviz
def treina_classificador():
with open('base_treino_usuarios.js') as arquivo_usuarios:
json_usuarios = json.load(arquivo_usuarios)
with open('base_treino_eventos.js') as arquivo_eventos:
json_eventos = json.load(arquivo_eventos)
usuarios = parsers.parse_json_usuarios(json_usuarios)
print('usuarios')
print(usuarios)
print('json de evento antes de mandar pro parser')
print(json_eventos['eventos'])
eventos = parsers.parse_json_eventos(json_eventos['eventos'])
print('eventos')
print(eventos)
df_usuario_evento = pd.DataFrame()
for usuario in usuarios:
series_usuario = usuario.to_series()
for evento in eventos:
print('usuario:',series_usuario.to_string())
series_evento = evento.to_series()
print('evento:',series_evento.to_string())
series_usuario_evento = series_usuario.append(series_evento)
print('Digite o interesse do usuario no evento: ')
interesse = int(input('Interesse: '))
if(interesse == -1):
break
elif(interesse == -2):
df_usuario_evento = formata_dataframe_usuario_evento(df_usuario_evento)
print(df_usuario_evento)
df_usuario_evento.to_csv('classific_usuario_evento.csv')
return df_usuario_evento
series_usuario_evento['interesse'] = interesse
df_usuario_evento = df_usuario_evento.append(series_usuario_evento, ignore_index=True)
print('Dataframe:')
print(df_usuario_evento)
df_usuario_evento = formata_dataframe_usuario_evento(df_usuario_evento)
df_usuario_evento.to_csv('classific_usuario_evento.csv')
return df_usuario_evento
def monta_dataframe_usuario_evento(usuario, eventos):
df_usuario_evento = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # The Series Data Structure
# In[ ]:
import pandas as pd
# In[ ]:
animals = ['Tiger', 'Bear', 'Moose']
pd.Series(animals)
# In[ ]:
numbers = [1, 2, 3]
pd.Series(numbers)
# In[ ]:
animals = ['Tiger', 'Bear', None]
pd.Series(animals)
# In[ ]:
numbers = [1, 2, None]
pd.Series(numbers)
# In[ ]:
import numpy as np
np.nan == None
# In[ ]:
np.nan == np.nan
# In[ ]:
np.isnan(np.nan)
# In[ ]:
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
# In[ ]:
s.index
# In[ ]:
s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
s
# In[ ]:
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])
s
# # Querying a Series
# In[ ]:
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
# In[ ]:
s.iloc[3]
# In[ ]:
s.loc['Golf']
# In[ ]:
s[3]
# In[ ]:
s['Golf']
# In[ ]:
sports = {99: 'Bhutan',
100: 'Scotland',
101: 'Japan',
102: 'South Korea'}
s = | pd.Series(sports) | pandas.Series |
from __future__ import division
import math
from collections import OrderedDict, defaultdict
import logbook
import numpy as np
import pandas as pd
import alephnull.protocol as zp
from .position import positiondict
try:
from alephtools.connection import get_multiplier
except:
#Replace this with source to multiplier
get_multiplier = lambda x: 25
log = logbook.Logger('Performance')
class FuturesPerformancePeriod(object):
def __init__(
self,
starting_cash,
period_open=None,
period_close=None,
keep_transactions=True,
keep_orders=False,
serialize_positions=True):
# * #
self.starting_mav = starting_cash
self.ending_mav = starting_cash
self.cash_adjustment = 0
self.ending_total_value = 0.0
self.pnl = 0.0
# ** #
self.period_open = period_open
self.period_close = period_close
# sid => position object
self.positions = positiondict()
# rollover initializes a number of self's attributes:
self.rollover()
self.keep_transactions = keep_transactions
self.keep_orders = keep_orders
# Arrays for quick calculations of positions value
self._position_amounts = pd.Series()
self._position_last_sale_prices = pd.Series()
self.calculate_performance()
# An object to recycle via assigning new values
# when returning portfolio information.
# So as not to avoid creating a new object for each event
self._portfolio_store = zp.Portfolio()
self._positions_store = zp.Positions()
self.serialize_positions = serialize_positions
def rollover(self):
# * #
self.starting_mav = self.ending_mav
self.cash_adjustment = 0
self.pnl = 0.0
# ** #
self.processed_transactions = defaultdict(list)
self.orders_by_modified = defaultdict(OrderedDict)
self.orders_by_id = OrderedDict()
self.cumulative_capital_used = 0.0
self.max_capital_used = 0.0
self.max_leverage = 0.0
def ensure_position_index(self, sid):
try:
_ = self._position_amounts[sid]
_ = self._position_last_sale_prices[sid]
except (KeyError, IndexError):
self._position_amounts = \
self._position_amounts.append(pd.Series({sid: 0.0}))
self._position_last_sale_prices = \
self._position_last_sale_prices.append( | pd.Series({sid: 0.0}) | pandas.Series |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test___init__(self):
"""Test the ``__init__`` method.
Validate that the passed arguments are stored as attributes.
Input:
- a string passed to the ``error_on_unknown`` parameter.
Side effect:
- the ``error_on_unknown`` attribute is set to the passed string.
"""
# Run
transformer = OneHotEncodingTransformer(error_on_unknown='error_value')
# Asserts
assert transformer.error_on_unknown == 'error_value'
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_get_output_types(self):
"""Test the ``get_output_types`` method.
Validate that the ``_add_prefix`` method is properly applied to the ``output_types``
dictionary. For this class, the ``output_types`` dictionary is described as:
{
'value1': 'float',
'value2': 'float',
...
}
The number of items in the dictionary is defined by the ``dummies`` attribute.
Setup:
- initialize a ``OneHotEncodingTransformer`` and set:
- the ``dummies`` attribute to a list.
- the ``column_prefix`` attribute to a string.
Output:
- the ``output_types`` dictionary, but with ``self.column_prefix``
added to the beginning of the keys of the ``output_types`` dictionary.
"""
# Setup
transformer = OneHotEncodingTransformer()
transformer.column_prefix = 'abc'
transformer.dummies = [1, 2]
# Run
output = transformer.get_output_types()
# Assert
expected = {
'abc.value0': 'float',
'abc.value1': 'float'
}
assert output == expected
def test__fit_dummies_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` does not
contain nans.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c'])
def test__fit_dummies_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` contain ``np.nan``.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c', np.nan])
def test__fit_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b', 'c'])
assert ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_no_nans_numeric(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = | pd.Series([1, 2, 3]) | pandas.Series |
################################################################################
# NAM Groningen 2017 Model: DeepNL/Utrecht University/Seismology
#
# <NAME> - <EMAIL>
################################################################################
import pandas as pd
import numpy as np
class NAMModel:
# hard coded: see NAM 2017 report on Groningen Model
_slp_ns_u = 0.25 # vp slope : see NAM 2017 report
_slp_ns_b = 2.3 # vp slope : see NAM 2017 report
_slp_ck_b = 1.0 # vp slope : see NAM 2017 report
_slp_dc = 0.541 # vp slope : see NAM 2017 report
_isec_dc = 2572.3 # vp intersect : see NAM 2017 report
_vp_max_dc = 5000 # max vp : see NAM 2017 report
_vp_ze = 4400 # constant vp : see NAM 2017 report
_vp_fltr = 5900 # constant vp : see NAM 2017 report
_vp_zz = 4400 # constant vp : see NAM 2017 report
_vp_base = 5900 # constant vp : see NAM 2017 report
_vp_ro = 3900 # constant vp : see NAM 2017 report
_nx = -1
_ny = -1
_is_nxy = False #change once parameters are set
_pdf_xmin = np.nan
_pdf_xmax = np.nan
_pdf_ymin = np.nan
_pdf_ymax = np.nan
_dx = 50 # meters, hard coded: see NAM 2017 report
_dy = 50 # meters, hard coded: see NAM 2017 report
_vo_pathfname = '' # path and file name to Vo_maps.txt
_ho_pathfname = '' # path and file name to horizons.txt
_vo_pdframe = pd.DataFrame() # pandas dataframe for Vo_maps.txt
_ho_pdframe = | pd.DataFrame() | pandas.DataFrame |
import praw
import pandas as pd
from praw.models import MoreComments
from sqlalchemy import create_engine
import pymysql
import os
from google.cloud import language_v1
credential_path = "/Users/tommyshi/Documents/Academics/Fall 2021/1.125 Architecting and Engineering Software Systems/Final_Proj/1.125_fproj/prod-seeker-332518-f2e20d287e46.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
reddit = praw.Reddit(client_id='TxY72RVlJM3XkOtouvTAaw',
client_secret='_sRx1WW5Ub4_nqXH3NFoi8JgFVenCg',
user_agent='prods',
username='lolcatster',
password='<PASSWORD>')
# scrape relevant subreddits
subreddits = ['hometheater', '4kTV', 'HTBuyingGuides']
#, '4kTV', 'BuyItForLife']
comments = pd.DataFrame()
for sr in subreddits:
for post in reddit.subreddit(sr).top("all", limit = 100):
post.comments.replace_more(limit=0)
for top_level_comment in post.comments:
if top_level_comment.score > 1:
comments = comments.append({'post_title': post.title,
'post_ratio': post.upvote_ratio,
'comment_body':top_level_comment.body,
'comment_score': top_level_comment.score,
'post_comments': post.num_comments,
'post_date': post.created,
'comment_date':top_level_comment.created_utc,
'post_id': post.id,
'comment_id': top_level_comment.id},
ignore_index=True)
## Use Google NLP entity analysis
def sample_analyze_entity_sentiment(key, text_content):
"""
Analyzing Entity Sentiment in a String
Args:
text_content The text content to analyze
"""
client = language_v1.LanguageServiceClient()
# Available types: PLAIN_TEXT, HTML
type_ = language_v1.Document.Type.PLAIN_TEXT
language = "en"
document = {"content": text_content, "type_": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type})
# Loop through entitites returned from the API
df = pd.DataFrame()
for entity in response.entities:
if language_v1.Entity.Type(entity.type_).name != 'CONSUMER_GOOD':
continue
sentiment = entity.sentiment
for mention in entity.mentions:
if language_v1.EntityMention.Type(mention.type_).name != 'PROPER':
continue
# if language_v1.Entity.Type(entity.type_).name == 'CONSUMER_GOOD' and \
# sentiment.score > 0 and entity.salience > 0.75:
print(u"Representative name for the entity: {}".format(entity.name))
df = df.append({
'post_id': key.post_id,
'comment_id': key.comment_id,
'entity_name': entity.name,
'entity_type': language_v1.Entity.Type(entity.type_).name,
'entity_salience': entity.salience,
'entity_sentiment_score': sentiment.score,
'entity_sentiment_magnitude': sentiment.magnitude,
'entity_mention': language_v1.EntityMention.Type(mention.type_).name
},
ignore_index=True)
return df.drop_duplicates()
entities = pd.DataFrame()
entmaster = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
import json
import pandas as pd
import numpy as np
import glob
import ast
from modlamp.descriptors import *
import re
import cfg
import os
def not_in_range(seq):
if seq is None or len(seq) < 1 or len(seq) > 80:
return True
return False
def bad_terminus(peptide):
if peptide.nTerminus[0] is not None or peptide.cTerminus[0] is not None:
return True
return False
def is_valid(peptide):
try:
seq = peptide.seq[0]
if not seq.isupper():
return False
if bad_terminus(peptide):
return False
if not_in_range(seq):
return False
if seq.find("X") != -1:
return False
return True
except:
return False
def get_valid_sequences():
peptides = pd.DataFrame()
all_file_names = []
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "dbaasp/*.json")):
filename = j_file[j_file.rfind("/") + 1:]
with open(j_file, encoding='utf-8') as train_file:
try:
dict_tmp = json.load(train_file)
dict_tmp["seq"] = dict_tmp.pop("sequence")
dict_train = {}
dict_train["peptideCard"] = dict_tmp
except:
print(f'jsonLoad error!:{filename}')
continue
if dict_train["peptideCard"].get("unusualAminoAcids") != []:
continue
peptide = pd.DataFrame.from_dict(dict_train, orient='index')
if is_valid(peptide):
peptides = pd.concat([peptides, peptide])
all_file_names.append(filename)
peptides["filename"] = all_file_names
peptides.to_csv("./data/valid_sequences.csv")
return peptides
def add_activity_list(peptides):
activity_list_all = []
for targets in peptides.targetActivities: # one seq has a list of targets
try:
activity_list = []
for target in targets:
if target['unit']['name'] == 'µM': # µg/ml
try:
con = target['concentration']
activity_list.append(target['concentration'])
except:
continue
activity_list_all.append(activity_list)
except:
activity_list_all.append([])
continue
peptides["activity_list"] = activity_list_all
return peptides
def add_toxic_list(peptides):
toxic_list_all = []
for targets in peptides.hemoliticCytotoxicActivities: # one seq has a list of targets
try:
toxic_list = []
for target in targets:
if target['unit']['name'] == 'µM': # µg/ml
try:
toxic_list.append(target['concentration'])
except:
continue
toxic_list_all.append(toxic_list)
except:
toxic_list_all.append([])
continue
peptides["toxic_list"] = toxic_list_all
return peptides
def add_molecular_weights(peptides):
seqs = [doc for doc in peptides["seq"]]
mws = []
for seq in seqs:
try:
desc = GlobalDescriptor(seq.strip())
desc.calculate_MW(amide=True)
mw = desc.descriptor[0][0]
mws.append(mw)
except:
mws.append(None)
peptides["molecular_weight"] = mws
return peptides
def convert_units(peptides):
converted_activity_all = []
converted_toxic_all = []
for activity_list, toxic_list, molecular_weight in zip(peptides.activity_list,
peptides.toxic_list,
peptides.molecular_weight):
converted_activity_list = []
converted_toxic_list = []
for item in activity_list:
item = item.replace(">", "") # '>10' => 10
item = item.replace("<", "") # '<1.25' => 1.25
item = item.replace("=", "") # '=2' => 2
if item == "NA":
continue
if item.find("±") != -1:
item = item[:item.find("±")] # 10.7±4.6 => 10.7
if item.find("-") != -1:
item = item[:item.find("-")] # 12.5-25.0 => 12.5
item = item.strip()
try:
converted_activity_list.append(float(item) * molecular_weight / 1000)
except:
pass
for item in toxic_list:
item = item.replace(">", "") # '>10' => 10
item = item.replace("<", "") # '<1.25' => 1.25
item = item.replace("=", "") # '=2' => 2
if item == "NA":
continue
if item.find("±") != -1:
item = item[:item.find("±")] # 10.7±4.6 => 10.7
if item.find("-") != -1:
item = item[:item.find("-")] # 12.5-25.0 => 12.5
item = item.strip()
try:
converted_toxic_list.append(float(item) * molecular_weight / 1000)
except:
pass
converted_activity_all.append(converted_activity_list)
converted_toxic_all.append(converted_toxic_list)
peptides["converted_activity"] = converted_activity_all
peptides["converted_toxic"] = converted_toxic_all
print('--> Writing valid sequences with molecular weights converted to valid_sequences_with_mw_converted.csv')
peptides.to_csv("./data/valid_sequences_with_mw_converted.csv")
return peptides
# Starting process
print('Dataset Creation process begins ... ')
# AMP data
print('**** Creating AMP datasets ****')
# Get Valid Sequences
peptide_all = get_valid_sequences()
print ('1. Getting all valid peptide sequences from DBAASP, number of seqs extracted = ', len(peptide_all))
print('--> Sequences stored in valid_sequences.csv')
# Add molecular weights
print('2. Converting Molecular weights')
peptide_all_with_mw = add_molecular_weights(peptide_all)
# Extract list of anti-microbial activities and list of toxicities
peptide_all_with_activity = add_activity_list(peptide_all)
peptide_all_with_activity_toxicity = add_toxic_list(peptide_all_with_activity)
# Add the converted units to activity list and toxicity list
peptide_all_converted = convert_units(peptide_all_with_activity_toxicity)
# Statistics
def get_stats():
peptides = pd.DataFrame()
all_file_names = []
total = 0
unusual_amino_acids = 0
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "dbaasp/*.json")):
total += 1
filename = j_file[j_file.rfind("/") + 1:]
with open(j_file, encoding='utf-8') as train_file:
try:
dict_tmp = json.load(train_file)
dict_tmp["seq"] = dict_tmp.pop("sequence")
dict_train = {}
dict_train["peptideCard"] = dict_tmp
except:
print(f'jsonLoad error!:{filename}')
continue
if dict_train["peptideCard"].get("unusualAminoAcids") != []:
unusual_amino_acids += 1
continue
peptide = pd.DataFrame.from_dict(dict_train, orient='index')
peptides = pd.concat([peptides, peptide])
all_file_names.append(filename)
peptides["filename"] = all_file_names
print ("--> For DBAASP:")
print ("Total number of sequences:", total)
print ("Total number of unusual AminoAcids:", unusual_amino_acids)
return peptides
print('3. Some Statistics of collected valid sequences')
peptide_all = get_stats()
not_valid_count = len([seq for seq in peptide_all.seq if not_in_range(seq)])
print ("--> Number of not in range sequences:", not_valid_count)
print ("--> Number of valid sequences:", len(peptide_all_converted))
has_activity = [item for item in peptide_all_converted.activity_list if item != []]
print ("--> Number of valid sequences with antimicrobial activity:", len(has_activity))
has_toxicity = [item for item in peptide_all_converted.toxic_list if item != []]
print ("--> Number of valid sequences with toxicity:", len(has_toxicity))
################################################################
df = pd.read_csv("./data/valid_sequences_with_mw_converted.csv")
print (len(df))
# df.head() # default df: is dbaasp
def add_min_max_mean(df_in):
min_col = [min(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
max_col = [max(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
mean_col = [np.mean(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
df_in["min_activity"] = min_col
df_in["max_activity"] = max_col
df_in["avg_activity"] = mean_col
return df_in
def all_activity_more_than_30(x_str):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < 30:
return False # all of them
# just for negative (pos: any item < 10, neg: all of them > 30)
return True
def all_activity_more_than_str(x_str, num):
x = ast.literal_eval(x_str)
if len(x) == 0:
return False
for i in range(len(x)):
if x[i] < num:
return False
return True
def all_activity_more_than(df, num):
return df[df['converted_activity'].apply(lambda x: all_activity_more_than_str(x, num))]
def all_toxic_more_than(df, num):
return df[df['converted_toxic'].apply(lambda x: all_activity_more_than_str(x, num))]
def all_activity_less_than_str(x_str, num):
x = ast.literal_eval(x_str)
if len(x) == 0:
return False
for i in range(len(x)):
if x[i] > num:
return False
return True
def all_toxic_less_than(df, num):
return df[df['converted_toxic'].apply(lambda x: all_activity_less_than_str(x, num))]
def has_activity_less_than_10(x_str):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < 10:
return True
return False
def has_activity_less_than_str(x_str, num):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < num:
return True
return False
def has_activity_less_than(df, num):
return df[df['converted_activity'].apply(lambda x: has_activity_less_than_str(x, num))]
def get_seq_len_less_than(df, seq_length):
df_short = df[df['seq'].apply(lambda x: len(x) <= seq_length)]
return df_short
def remove_df(df1, df2):
return pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
# add min, max, mean to all dbaasp
df = add_min_max_mean(df)
df_dbaasp = df[["seq", "activity_list", "converted_activity",
"min_activity", "max_activity", "avg_activity"]]
df_dbaasp.to_csv("./data/all_valid_dbaasp.csv")
# 3) Overlapping sequences between DBAASP and Satpdb with AMP activity <10 ug/ml
print('4. Finding overlapping sequences between DBAASP and Satpdb with AMP activity <10 ug/ml ...')
def get_satpdb(train_file):
for line in train_file.readlines():
if "Peptide ID" in line:
record = {}
line = re.sub(u"\\<.*?\\>", "", line)
peptideId = line.split('Peptide ID')[1].split('Sequence')[0]
record['Peptide.ID'] = peptideId
record['Sequence'] = line.split('Sequence')[1].split('C-terminal modification')[0]
record['C.terminal.modification'] = line.split('C-terminal modification')[1].split('N-terminal modification')[0]
record['N.terminal.modification'] = line.split('N-terminal modification')[1].split('Peptide Type')[0]
record['Peptide.Type'] = line.split('Peptide Type')[1].split('Type of Modification')[0]
record['Type.of.Modification'] = line.split('Type of Modification')[1].split('Source (Databases)')[0]
record['Source..Databases.'] = line.split('Source (Databases)')[1].split('Link to Source')[0]
record['Link.to.Source'] = line.split('Link to Source')[1].split('Major Functions')[0]
record['Major.Functions'] = line.split('Major Functions')[1].split('Sub-functions')[0]
record['Sub.functions'] = line.split('Sub-functions')[1].split('Additional Info')[0]
record['Additional.Info'] = line.split('Additional Info')[1].split('Helix (%)')[0]
record['Helix'] = line.split('Helix (%)')[1].split('Strand (%)')[0]
record['Strand'] = line.split('Strand (%)')[1].split('Coil (%)')[0]
record['Coil'] = line.split('Coil (%)')[1].split('Turn (%)')[0]
record['Turn'] = line.split('Turn (%)')[1].split('DSSP states')[0]
record['DSSP.states'] = line.split('DSSP states')[1].split('Tertiary Structure')[0]
return peptideId, record
def get_satpdbs():
dict_train = {}
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "satpdb/source/*.html")):
with open(j_file, encoding='utf-8') as train_file:
try:
name, record = get_satpdb(train_file)
dict_train[name] = record
except:
print(f'error loading html:{j_file}')
peptides = pd.DataFrame.from_dict(dict_train, orient='index')
peptides.to_csv(os.path.join(cfg.DATA_ROOT,"satpdb/satpdb.csv"))
return peptides
df_satpdb = get_satpdbs()
#df_satpdb = pd.read_csv("./data/satpdb/satpdb.csv")
df_satpdb = df_satpdb.rename(index=str, columns={"Sequence": "seq",
"C.terminal.modification": "cterminal",
"N.terminal.modification": "nterminal",
"Peptide.Type": "Peptide_Type",
"Type.of.Modification": "modi"})
valid_df_satpdb = df_satpdb[(df_satpdb.cterminal == "Free") &
(df_satpdb.nterminal == "Free") &
(df_satpdb.Peptide_Type == "Linear") &
(df_satpdb.modi == "None")]
print ("--> Number of valid satpdb = ", len(valid_df_satpdb))
df_overlap = pd.merge(df, valid_df_satpdb, on='seq', how='inner')
print ("--> Number of overlap sequences = ", len(df_overlap))
min_col = [min(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
max_col = [max(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
mean_col = [np.mean(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
df_overlap["min_activity"] = min_col
df_overlap["max_activity"] = max_col
df_overlap["avg_activity"] = mean_col
df_overlap_all = df_overlap[["seq", "activity_list", "converted_activity",
"min_activity", "max_activity", "avg_activity"]]
print('5. Writing the overlap sequences to all_overlap.csv')
df_overlap_all.to_csv("./data/all_overlap.csv")
# length for all <=50
#
# overlap_neg: satpdb all activity greater than 100 : negative
# ** satpdb_pos: satpdb (the same as uniprot1) - overlap_neg
# dbaasp < 25 -> pos anything
# ** amp_pos = dbassp < 25 + satpdb_pos
# select sequences dbaasp, satpdb, and overlap(dbaasp, satpdb) of len <=50
print('6. Selecting sequences dbaasp, satpdb, and overlap(dbaasp, satpdb) of len <=50')
df = get_seq_len_less_than(df, 50)
df_overlap = get_seq_len_less_than(df_overlap, 50)
valid_df_satpdb = get_seq_len_less_than(valid_df_satpdb, 50)
print('7. Selecting negative and positive sequences for AMP activity')
overlap_neg = all_activity_more_than(df_overlap, 100)
print ("--> Number of negative seq in satpdb", len(overlap_neg))
print ("--> Number of unique seq in satpdb", len(valid_df_satpdb["seq"].drop_duplicates()))
satpdb_pos = remove_df(valid_df_satpdb["seq"].drop_duplicates(), overlap_neg["seq"])
satpdb_pos1 = pd.DataFrame({'seq': satpdb_pos.values}) # amp_pos[["seq"]]
satpdb_pos1["source"] = ["satpdb_pos"] * len(satpdb_pos1)
satpdb_pos1 = satpdb_pos1[["seq", "source"]]
print ("--> Number of positive seq in satpdb", len(satpdb_pos))
satpdb_pos1.seq = satpdb_pos1.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
satpdb_pos1 = satpdb_pos1.drop_duplicates('seq')
print('--> Writing to satpdb_pos.csv')
satpdb_pos1.to_csv("./data/satpdb_pos.csv", index=False, header=False)
def get_ampep(path):
ampeps = {}
ampeps['seq'] = []
for line in open(path).readlines():
if not line.startswith('>'):
ampeps['seq'].append(line.strip())
return pd.DataFrame.from_dict(ampeps)
# combine all positive sequences
print('8. Combining all positive sequences for AMP activity')
# col_Names = ["seq", "label"]
# print('--> Parsing ampep sequences')
# ampep_pos = pd.read_csv("./data_processing/data/ampep/pos_ampep_l1-80.csv", names=col_Names)
# ampep_pos = ampep_pos.drop(columns=['label'])
# ampep_pos.seq = ampep_pos.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
ampep_pos = get_ampep(os.path.join(cfg.DATA_ROOT, "ampep/train_AMP_3268.fasta"))
ampep_pos = get_seq_len_less_than(ampep_pos, 50)
ampep_pos["source"] = ["ampep_pos"]*len(ampep_pos)
ampep_pos = ampep_pos[["seq", "source"]]
print('--> Writing to ampep_pos.csv')
print ("--> Number of ampep_pos", len(ampep_pos))
ampep_pos.to_csv("./data/ampep_pos.csv", index=False, header=False)
print('--> Writing dbaasp sequences')
print ("--> Number of all seqs dbaasp", len(df))
dbaasp_pos = has_activity_less_than(df, 25)["seq"]
dbaasp_pos1 = pd.DataFrame({'seq': dbaasp_pos.values})
dbaasp_pos1["source"] = ["dbaasp_pos"] * len(dbaasp_pos1)
dbaasp_pos1 = dbaasp_pos1[["seq", "source"]]
print ("--> Number of dbaasp_less_than_25:", len(dbaasp_pos), "number of satpdb_pos:", len(satpdb_pos))
amp_pos = pd.concat([dbaasp_pos1, satpdb_pos1, ampep_pos]).drop_duplicates('seq')
print ("--> Number of amp_pos", len(amp_pos))
amp_pos.columns = ['seq', 'source']
amp_pos['source2'] = amp_pos['source']
amp_pos['source'] = amp_pos['source'].map({'dbaasp_pos': 'amp_pos', 'ampep_pos': 'amp_pos', 'satpdb_pos': 'amp_pos'})
amp_pos = amp_pos[amp_pos['seq'].str.contains('^[A-Z]+')]
amp_pos = amp_pos[~amp_pos.seq.str.contains("B")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("J")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("O")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("U")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("X")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("Z")]
amp_pos = amp_pos[~amp_pos.seq.str.contains('[a-z]')]
amp_pos = amp_pos[~amp_pos.seq.str.contains("-")]
amp_pos = amp_pos[~amp_pos.seq.str.contains(r'[0-9]')]
#amp_pos.seq = amp_pos.seq.apply(lambda x: " ".join(x)) # remove the space from the seq
print('--> Writing amp_pos.csv combined from dbaasp, ampep, satpdb positive sequences')
amp_pos.to_csv("./data/amp_pos.csv", index=False, header=False)
dbaasp_more_than_100 = pd.DataFrame()
dbaasp_more_than_100["seq"] = all_activity_more_than(df, 100)["seq"]
#print ("dbaasp_more_than_100", len(dbaasp_more_than_100))
#print(all_activity_more_than(df, 100).head())
# ampep negative and uniprot sequences
print('9. Collecting uniprot sequences as unknown label')
col_Names = ["seq"]
uniprot_unk1 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"uniprot/uniprot_reviewed_yes_l1-80.txt"), names=col_Names)
col_Names = ["seq"]
uniprot_unk2 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"uniprot/uniprot_reviewed_no_l1-80.txt"), names=col_Names)
uniprot_unk = pd.concat([uniprot_unk1, uniprot_unk2]).drop_duplicates()
uniprot_unk = get_seq_len_less_than(uniprot_unk, 50)
print ("--> uniprot_unk", len(uniprot_unk))
uniprot_unk["source"] = ["uniprot"] * len(uniprot_unk)
uniprot_unk["source2"] = uniprot_unk["source"]
uniprot_unk['source'] = uniprot_unk['source'].map({'uniprot': 'unk'})
print('--> Writing uniprot_unk.csv ')
uniprot_unk.to_csv("./data/uniprot_unk.csv", index=False, header=False)
print('10. Collecting negative sequences for AMP activity ...')
# col_Names = ["seq", "label"]
# ampep_neg = pd.read_csv("./data/ampep/neg_ampep_l1-80.csv", names=col_Names)
# ampep_neg.seq = ampep_neg.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
# #ampep_neg.columns = ['']
# ampep_neg = ampep_neg.drop(columns=['label'])
ampep_neg = get_ampep(os.path.join(cfg.DATA_ROOT, "ampep/train_nonAMP_9777.fasta"))
ampep_neg = get_seq_len_less_than(ampep_neg, 50)
#print ("----------")
print ("--> Parsing ampep negative sequences, number of ampep_neg = ", len(ampep_neg))
# dbaasp_neg = dbaasp > 100 -> neg (how many you loose)
# Combined_NEG: 10*(dbaasp > 100) + UNIPROT_0
# Combined_POS = Satpdb_pos + ampep_pos + dbaasp_pos
dbaasp_more_than_100["source"] = ["dbaasp_neg"] * len(dbaasp_more_than_100)
# remove duplicates between ampep negative and dbaasp negative
ampep_neg["source"] = ["ampep_neg"] * len(ampep_neg)
ampep_neg = ampep_neg[["seq", "source"]]
print ("--> dbaasp_more_than_100:", len(dbaasp_more_than_100), "ampep_neg:", len(ampep_neg))
# combined_neg = remove_df(pd.concat([dbaasp_more_than_100, uniprot_neg]).drop_duplicates, amp_pos1)
combined_neg = pd.concat([dbaasp_more_than_100, ampep_neg]).drop_duplicates('seq')
# satpdb_pos = remove_df(valid_df_satpdb["seq"].drop_duplicates(), overlap_neg["seq"])
print ("--> combined_neg number = ", len(combined_neg))
combined_neg.to_csv("./data/dbaasp_more_than100_combined_ampep_neg.csv", index=False, header=False) # not multiplied the samples.
common = amp_pos.merge(combined_neg, on=['seq'])
# print(common.head())
combined_neg1 = pd.concat([combined_neg, common]).drop_duplicates('seq')
# print(combined_neg1.head())
combined_neg1['source2'] = combined_neg1['source']
combined_neg1['source'] = combined_neg1['source'].map({'dbaasp_neg': 'amp_negc', 'ampep_neg': 'amp_negnc'})
combined_neg1 = combined_neg1.drop(columns=['source_x', 'source_y'])
# print(combined_neg1.head())
combined_neg1 = combined_neg1[combined_neg1['seq'].str.contains('^[A-Z]+')]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("B")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("J")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("O")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("U")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("X")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("Z")]
combine_neg1 = combined_neg1[~combined_neg1.seq.str.contains("-")]
combine_neg1 = combined_neg1[~combined_neg1.seq.str.contains('[a-z]')]
#combined_neg1=combined_neg1[~combined_neg1.seq.str.contains("*")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains(r'[0-9]')]
print('--> Writing combined negative sequences collected from DBAASP and AMPEP to amp_neg.csv')
combined_neg1.to_csv("./data/amp_neg.csv", index=False, header=False) # not multiplied the samples.
# Toxicity data
print('**** Creating Toxicity datasets ****')
# don't need toxinpred_pos as satpdb takes care of it
# toxinpred is already len <=35.
col_Names = ["seq"]
print('1. Collecting Toxicity negative samples')
toxinpred_neg1 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"toxicity/nontoxic_trembl_toxinnpred.txt"), names=col_Names)
print ("--> toxinpred_neg1 number = ", len(toxinpred_neg1))
toxinpred_neg1["source2"] = ["toxinpred_neg_tr"] * len(toxinpred_neg1)
toxinpred_neg1 = toxinpred_neg1[["seq", "source2"]]
toxinpred_neg2 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"toxicity/nontoxic_swissprot_toxinnpred.txt"), names=col_Names)
print ("--> toxinpred_neg2 number = ", len(toxinpred_neg2))
toxinpred_neg2["source2"] = ["toxinpred_neg_sp"] * len(toxinpred_neg2)
toxinpred_neg2 = toxinpred_neg2[["seq", "source2"]]
toxinpred_neg = pd.concat([toxinpred_neg1, toxinpred_neg2]).drop_duplicates('seq')
print('--> toxinpred_neg number = ', len(toxinpred_neg))
# valid_df_satpdb
toxic = valid_df_satpdb[valid_df_satpdb['Major.Functions'].str.contains("toxic")]
toxic = valid_df_satpdb[valid_df_satpdb['Major.Functions'].str.contains("toxic") | valid_df_satpdb['Sub.functions'].str.contains("toxic")]
print ('--> Valid toxicity sequences from Satpdb = ', len(toxic))
# for toxicity:
# dbassp
# all of them > 250 -> dbaap_neg
# all of them < 200-> dbaap_pos
#
# combined_toxic_pos = satpdb_pos + dbaap_pos
#
# combined_toxic_neg = 10*(dbaap_neg) + UNiprot0
# df from dbaasp, toxic from satpdb
print('2. Collecting Toxicity positive samples')
df_overlap_tox = pd.merge(df, toxic, on='seq', how='inner')[["seq", "toxic_list", "converted_toxic"]]
combined_toxic_pos = all_toxic_less_than(df_overlap_tox, 200)
dbaasp_toxic_pos = all_toxic_less_than(df, 200)
dbaasp_toxic_pos["source2"] = ["dbaasp"] * len(dbaasp_toxic_pos)
dbaasp_toxic_pos = dbaasp_toxic_pos[["seq", "source2"]]
toxic["source2"] = ["satpdb"]*len(toxic)
toxic = toxic[["seq", "source2"]]
combined_toxic_pos = pd.concat([dbaasp_toxic_pos, toxic]).drop_duplicates('seq')
combined_toxic_pos['source'] = 'tox_pos'
#combined_toxic_pos = combined_toxic_pos[["seq", "source", "tox"]]
combined_toxic_pos = combined_toxic_pos[["seq", "source", "source2"]]
combined_toxic_pos = combined_toxic_pos[combined_toxic_pos['seq'].str.contains('^[A-Z]+')]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("B")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("J")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("O")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("U")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("X")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("Z")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains('[a-z]')]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("-")]
#combined_toxic_pos=combined_toxic_pos[~combined_toxic_pos.seq.str.contains("*")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains(r'[0-9]')]
combined_toxic_pos.to_csv("./data/toxic_pos.csv", index=False, header=False)
print ('--> combined_toxic_pos number = ', len(combined_toxic_pos))
dbaasp_neg = all_toxic_more_than(df, 250)
dbaasp_neg["source2"] = ["dbaasp"] * len(dbaasp_neg)
dbaasp_neg['source'] = 'tox_negc'
dbaasp_neg = dbaasp_neg[["seq", "source", "source2"]]
dbaasp_neg.head()
toxinpred_neg['source'] = 'tox_negnc'
toxinpred_neg = toxinpred_neg[["seq", "source", "source2"]]
combined_toxic_neg = | pd.concat([dbaasp_neg, toxinpred_neg]) | pandas.concat |
import pandas as pd
import numpy as np
import os
import json
import requests
from bs4 import BeautifulSoup
from io import StringIO
# def get_current_players():
# rootdir = '../resources/players/'
# player_names = []
# for subdir, dirs, files in os.walk(rootdir):
# for file in files:
# data_path = os.path.join(subdir)
# name = data_path.replace(rootdir, "")
# player_names.append(name)
#
# filename = "../resources/scraped_players.csv"
# # opening the file with w+ mode truncates the file
# f = open(filename, "w+")
# f.close()
# for path, subdirs, files in os.walk(rootdir):
# for name in files:
# if name == 'gw.csv':
# trainFile = os.path.join(path, name)
# pwd = os.getcwd()
# os.chdir(os.path.dirname(trainFile))
# df = pd.read_csv(os.path.basename(trainFile), sep=',', skiprows=[0], header=None, encoding='utf-8')
# os.chdir(pwd)
# with open(filename, 'a') as f:
# df.to_csv(f, header=False)
#
# def merge_ids():
# get_current_players()
# player_df = pd.read_csv('../resources/scraped_players.csv', sep=',', encoding='utf-8', header=None)
# id_file = '../resources/player_idlist.csv'
# ids = pd.read_csv(id_file, sep=',', encoding='utf-8')
#
# player_df['season'] = '2017/2018'
# player_df.columns = ['round', 'assists', 'attempted_passes', 'big_chances_created',
# 'big_chances_missed', 'bonus', 'bps', 'clean_sheets',
# 'clearances_blocks_interceptions', 'completed_passes', 'creativity',
# 'dribbles', 'ea_index', 'element', 'errors_leading_to_goal',
# 'errors_leading_to_goal_attempt', 'fixture', 'fouls', 'goals_conceded',
# 'goals_scored', 'ict_index', 'id', 'influence', 'key_passes',
# 'kickoff_time', 'kickoff_time_formatted', 'loaned_in', 'loaned_out',
# 'minutes', 'offside', 'open_play_crosses', 'opponent_team', 'own_goals',
# 'penalties_conceded', 'penalties_missed', 'penalties_saved',
# 'recoveries', 'red_cards', 'round', 'saves', 'selected', 'tackled',
# 'tackles', 'target_missed', 'team_a_score', 'team_h_score', 'threat',
# 'total_points', 'transfers_balance', 'transfers_in', 'transfers_out',
# 'value', 'was_home', 'winning_goals', 'yellow_cards', 'season']
# player_df.drop(['id'], axis=1, inplace=True)
# player_df.rename(columns={'element': 'id'}, inplace=True)
#
# players = player_df.merge(ids, how='left', on=['id'])
# players.to_csv('../resources/BaseData2017-18.csv', sep=',', encoding='utf-8')
#
# def team_data():
# merge_ids()
# raw_file = '../resources/players_raw.csv'
# players_raw = pd.read_csv(raw_file, sep=',', encoding='utf-8')
# teams = '../resources/team_codes.csv'
# team_codes = pd.read_csv(teams, sep=',', encoding='utf-8')
# team_codes.rename(columns={'team_code': 'team'}, inplace=True)
# all_teams = players_raw.merge(team_codes, how='left', on=['team'])
# new = all_teams[['first_name', 'second_name', 'team', 'team_name']].copy()
#
# cuurent_players_file = '../resources/BaseData2017-18.csv'
# current_players = pd.read_csv(cuurent_players_file, sep=',', encoding='utf-8')
#
# merged_players = current_players.merge(new, how='left', on=['first_name', 'second_name'])
#
# opponent_team_codes = team_codes.copy()
# opponent_team_codes.rename(columns={'team': 'opponent_team'}, inplace=True)
# data = merged_players.merge(opponent_team_codes, how='left', on=['opponent_team'])
# data.rename(columns={'team_name_x': 'team_name', 'team_name_y': 'opponent_team_name'}, inplace=True)
# data.drop(['Unnamed: 0', 'winning_goals'], axis=1, inplace=True)
# data.to_csv('../resources/BeforeCreatedFeatures2017-18.csv', sep=',', encoding='utf-8')
def merge_league_ranks():
# team_data()
CurrentPlayers = pd.read_csv('../resources/BeforeCreatedFeatures2017-18.csv', sep=',', encoding='utf-8')
CurrentPlayers.drop(['Unnamed: 0', 'team', 'attempted_passes', 'big_chances_missed', 'bps', 'big_chances_created',
'clearances_blocks_interceptions', 'completed_passes', 'dribbles', 'round',
'errors_leading_to_goal', 'errors_leading_to_goal_attempt', 'fouls',
'kickoff_time', 'kickoff_time_formatted', 'loaned_in', 'loaned_out', 'offside',
'open_play_crosses','own_goals', 'penalties_conceded', 'penalties_missed', 'penalties_saved',
'recoveries', 'red_cards', 'selected', 'tackled', 'tackles', 'target_missed',
'transfers_balance', 'transfers_in', 'transfers_out', 'yellow_cards', 'ea_index'],
axis=1, inplace=True)
CurrentPlayers.rename(columns={'team_name': 'team', 'opponent_team_name': 'opponents', 'second_name': 'name',
'round.1':'round'}, inplace=True)
CurrentPlayers.replace(['Bournmouth', 'Brighton', 'Huddersfield'], ['AFC Bournemouth', 'Brighton and Hove Albion',
'Huddersfield Town'], inplace=True)
a = 0
b = 1
c = 2
df_list = []
for i in range(1, 29):
url = "https://footballapi.pulselive.com/football/standings?compSeasons=79&altIds=true&detail=2&FOOTBALL_COMPETITION=1&gameweekNumbers=1-" + str(
i)
r = requests.get(url, headers={"Content-Type": "application/x-www-form-urlencoded", "Connection": "keep-alive",
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US, en; q=0.9",
"Host": "footballapi.pulselive.com", "Origin": "https://www.premierleague.com",
"Referer": "https://www.premierleague.com/tables?co=1&se=79&ha=-1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
})
data = json.loads(r.text)
for x in data['tables']:
y = x['entries']
for j in range(0, 20):
rank_data = y[j]
position = rank_data["position"]
team = rank_data['team']
team_name = team['name']
df = pd.DataFrame({'gameweek': i, 'position': position, 'name': team_name}, index=[a, b, c])
df_list.append(df)
a = a + 1
b = b + 1
c = c + 1
result = pd.concat(df_list)
result = result.drop_duplicates()
result.rename(columns={'gameweek': 'round'}, inplace=True)
result.rename(columns={'name': 'team'}, inplace=True)
df = pd.merge(CurrentPlayers, result, how='left', left_on=['round', 'team'], right_on=['round', 'team'])
opponent_ranks = result.rename(columns={'team': 'opponents', 'position': 'opponent_position'})
merged = | pd.merge(df, opponent_ranks, how='left', left_on=['round', 'opponents'], right_on=['round', 'opponents']) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# # Deploy Model
# In this section we will submit data to the [Azure Machine Learning Model Endpoint](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) we have created in the Azure Portal - using Python to call a REST API
#
# We will be following a simlilar process to the documentation here:[How to Consume a Web Service](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service)
# ## Connect to Azure ML Service
# We [connect to the Azure Machine Learning Service](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-1st-experiment-sdk-setup) in order to get details about calling the web service we have created
#
# Be sure to fill in 'config.json' file provided with the code. This must contain your Azure Machine Learning workspace information for the code below to work
# In[ ]:
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws)
# ## Find Web Service by Name and get Connection Details
# We select the web service by the name and this will provide us information on the URLs such as scoring and [swagger](https://swagger.io/) calls
# In[ ]:
from azureml.core import Webservice
service = Webservice(workspace=ws, name='<insert web service name here>')
print("Score URI: " + str(service.scoring_uri))
print("Swagger URI: " + str(service.swagger_uri))
primary, secondary = service.get_keys()
print(primary)
# ## Data Input to REST API Schema
# Now we need to understand the schema of the data to be input into the REST call.
#
# You can get this sample input information from the auto-generated scoring script that was created in the Azure Portal
# In[ ]:
import pandas as pd
input_sample = | pd.DataFrame(data=[{'Escalated': 0, 'GeographyID': 2, 'OriginalScore': 9, 'Tenure': 25.0, 'Theme': 'speed', 'RoleID': 2, 'Continent': 'Europe', 'CountryRegion': 'France', 'RoleInOrg': 'consumer', 'CompletedTutorialBinary': 1, 'RatingNumeric': 1, 'DateCreatedDay': 23, 'DateCreatedMonth': 11, 'DateCreatedYear': 2018}]) | pandas.DataFrame |
import datetime as dt
import os
from datetime import timedelta
from pathlib import Path
import holidays
import pandas as pd
from sqlalchemy import create_engine
DB_HOST = 'localhost'
DB_PORT = 5432
DB = 'group_16'
def main():
user = input('Enter pgadmin username: ')
password = input('Enter pgadmin password: ')
engine = create_engine('postgresql+psycopg2://{}:{}@{}:{}/{}'.format(user, password, DB_HOST, DB_PORT, DB))
print('----------HOUR----------')
hour_df = create_hour_df()
insert_hour(engine, hour_df)
print('--------END HOUR--------')
print('---------WEATHER--------')
weather_df = create_weather_df()
insert_weather(engine, weather_df)
print('-------END WEATHER------')
print('--------COLLISION-------')
collision_df = create_collision_df()
print('--------LOCATION--------')
insert_location(engine, collision_df)
print('------END LOCATION------')
print('--------ACCIDENT--------')
insert_accident(engine, collision_df)
print('------END ACCIDENT------')
print('------END COLLISION-----')
print('----------FACT----------')
insert_fact(engine)
print('--------END FACT--------')
def create_weather_df():
weather_csv = Path('weather_final.csv')
if weather_csv.is_file():
weather_df = pd.read_csv('weather_final.csv', dtype={'weather': str})
else:
# Filter relevant stations from the file (ON, hourly data still active 2014+)
iter_stations = pd.read_csv('Station Inventory EN.csv', header=3, chunksize=1000)
station_df = pd.concat([chunk[(chunk['Province'] == 'ONTARIO') & (chunk['HLY Last Year'] >= 2014)] for chunk in iter_stations], ignore_index=True)
station_df = station_df.sort_values(by='Station ID').drop_duplicates('Name', keep='last')
station_df = station_df[['Name', 'Latitude (Decimal Degrees)', 'Longitude (Decimal Degrees)']] # Select columns
station_df.columns = ['station_name', 'latitude', 'longitude'] # Rename columns
station_names = station_df['station_name'].tolist()
# Create one dataframe from all files
weather_df = pd.DataFrame()
files = ['ontario_1_1.csv', 'ontario_1_2.csv', 'ontario_2_1.csv', 'Ontario_2_2.csv', 'Ontario_3.csv', 'Ontario_4.csv']
for f in files:
print('Processing file: ' + f)
# Get filtered dataframe for file
reader = pd.read_csv(f, chunksize=1000)
df = pd.concat([chunk[(chunk['Year'] >= 2014) & (chunk['X.U.FEFF..Station.Name.'].isin(station_names))] for chunk in reader])
# Drop rows that have no weather data
df.dropna(subset=df.columns[range(5, len(df.columns)-2)], how='all', inplace=True)
# Combine final df with file df
weather_df = pd.concat([weather_df, df], ignore_index=True)
# Clean and finalize dataframe
weather_df.drop(columns=list(weather_df.filter(regex='.Flag$')) + ['Year', 'Month', 'Day', 'Time', 'X.Province.'], inplace=True)
weather_df.columns = [
'date_time',
'temp_c',
'dew_point_temp_c',
'rel_hum',
'wind_dir_deg',
'wind_spd_kmh',
'visibility_km',
'stn_press_kpa',
'hmdx',
'wind_chill',
'weather',
'station_name'
]
weather_df = weather_df.merge(station_df, how='left')
weather_df.sort_values(['station_name', 'date_time'], inplace=True)
weather_df.to_csv('weather_final.csv', index=False)
print('Weather Processed (Rows, Cols) : ', weather_df.shape)
return weather_df
def insert_weather(engine, weather_df):
connection = engine.connect()
connection.execute('DROP TABLE IF EXISTS weather CASCADE')
connection.execute('''CREATE TABLE weather(
weather_key serial PRIMARY KEY,
station_name text,
longitude float8,
latitude float8,
temp_c float4,
dew_point_temp_c float4,
rel_hum int,
wind_dir_deg int,
wind_spd_kmh int,
visibility_km float4,
stn_press_kpa float4,
hmdx int,
wind_chill int,
weather text,
date_time timestamp,
hour_id int)''')
connection.close()
weather_df.to_sql('weather', con=engine, if_exists='append', index=False)
print('Weather table added to DB')
connection = engine.connect()
connection.execute('''UPDATE weather w
SET hour_id = h.hour_key
FROM hours h
WHERE w.date_time::date = h.currentdate
AND w.date_time::time = h.hour_start''')
connection.close()
print('Column hour_id updated in weather table')
def create_hour_df():
hour_df = pd.DataFrame()
hour_df['currentdate'] = pd.date_range(start='1/1/2014', end='01/01/2018', freq='H', closed='left')
hour_df['day_of_week'] = hour_df['currentdate'].dt.day_name()
hour_df['month_of_year'] = hour_df['currentdate'].dt.month_name()
hour_df['is_weekend'] = ((pd.DatetimeIndex(hour_df['currentdate']).dayofweek) // 5 == 1)
#HOLIDAY FLAG
# creating a blank series
Type_new_hol_flags = pd.Series([])
Type_new_hol_name = | pd.Series([]) | pandas.Series |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from application import model_builder
def test_validate_types_numeric_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = ["3", "4", "5"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_throws_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["3d", "4d", "5d"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s c", "0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Percentage"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [30.0, 40.0, 50.0]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [0.3, 0.4, 0.5]
df["Some Feature 2"] = ["0.3%", "0.4 %", " 0.5 %"]
df["Some Feature 3"] = ["30", "40", " 50"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Percentage"],
["Some Feature 2", "Percentage"],
["Some Feature 3", "Percentage"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_money_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s$", "$0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Money"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [50000, 40000.0, 50000]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [30, 40, 50]
df["Some Feature 2"] = ["$30", "$ 40 ", " $50 "]
df["Some Feature 3"] = ["$50,000", "40000", " 50,000"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Money"],
["Some Feature 2", "Money"],
["Some Feature 3", "Money"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_value_set_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = ["Married", "Single", "Married"]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Value Set"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_value_set_throws_value_exception_too_many_values():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 2000)
df["Answer"] = range(1, 2000)
fields = [["Some Feature", "Value Set"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_yes_no_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = ["Yes", "No", "No Data"]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Yes/No"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_yes_no_throws_value_exception_too_many_values():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 5)
df["Answer"] = range(1, 5)
fields = [["Some Feature", "Yes/No"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_invalid_field_type():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 5)
df["Answer"] = range(1, 5)
fields = [["Some Feature", "Invalid Type"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_stripdown_splits_x_variables():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = [3, 4, 5]
df["Some Feature"] = x_expect["Some Feature"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_splits_response_variable_works():
# Arrange
df = pd.DataFrame()
y_expect = pd.Series([1, 0, 0], name="Answer")
df["Some Feature"] = [3, 4, 5]
df["Answer"] = y_expect
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_series_equal(y, y_expect)
def test_stripdown_splits_response_variable_works_if_scale_of_0_to_100():
# Arrange
df = pd.DataFrame()
y_expect = pd.Series([0, 0, 1], dtype="int32")
df["Some Feature"] = [3, 4, 5]
df["Answer"] = [50, 70, 100]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_series_equal(y, y_expect)
def test_stripdown_removes_contact_details():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = [3, 4, 5]
df["Some Feature"] = x_expect["Some Feature"]
df["Contacts1"] = ["tom", "john", "sarah"]
df["Contacts2"] = ["tom", "john", "sarah"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["Contacts1", "Contact Details"], ["Contacts2", "Contact Details"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_removes_string_fields():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = [3, 4, 5]
df["Some Feature"] = x_expect["Some Feature"]
df["Postcodes"] = ["2104", "2000", "2756"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["Postcodes", "String"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_removes_columns_with_many_nulls_fields():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = range(1, 12)
df["Some Feature"] = x_expect["Some Feature"]
df["A lot of Nulls"] = [None, 1, 2, 3, 4, 5, 6, 7, 8, None, 9]
df["Answer"] = range(1, 12)
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["A lot of Nulls", "Numeric"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_doesnt_remove_columns_with_some_nulls():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = range(1, 12)
x_expect["A lot of Nulls"] = [None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
df["Some Feature"] = x_expect["Some Feature"]
df["A lot of Nulls"] = x_expect["A lot of Nulls"]
df["Answer"] = range(1, 12)
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["A lot of Nulls", "Numeric"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_knn_imputer_fills_nulls_on_numeric():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 12)
df["A lot of Nulls"] = [None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
df["Answer"] = range(1, 12)
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["A lot of Nulls", "Numeric"]]
# Act
new_df = model_builder.impute_nulls(df, fields)
# Assert
assert new_df["A lot of Nulls"].isna().sum() == 0
def test_knn_imputer_does_nothing_if_not_numeric():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 12)
df["Some Feature 2"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
df["Answer"] = range(1, 12)
fields = [["Some Feature", "Value Set"], ["Answer", "Response Variable"],
["Some Feature 2", "Value Set"]]
# Act
new_df = model_builder.impute_nulls(df, fields)
# Assert
| assert_frame_equal(df, new_df) | pandas.testing.assert_frame_equal |
from datetime import datetime
import pandas as pd
from coinmetrics.api_client import CoinMetricsClient
import json
from flask import Flask, jsonify
from flask_swagger import swagger
from compound import Compound
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
client = CoinMetricsClient()
@app.route("/")
def spec():
swag = swagger(app)
swag['info'] = []
swag['definitions'] = []
return jsonify(swag)
@app.route('/hello_world')
def hello_world(): # put application's code here
"""
Hello, World!
---
responses:
200:
description: Hello, World!
"""
return 'Hello World!'
# create a route to get the list of assets
@app.route('/assets')
def get_assets():
"""
Return s a list of all the assets you can get metrics for
---
responses:
200:
description: Great success!
"""
assets = client.catalog_metrics("PriceUSD")[0]["frequencies"][0]["assets"]
return json.dumps(assets)
# create a route to get the metrics for a specific asset
@app.route('/metrics/<asset>')
def get_metrics(asset):
"""
Returns a list of all the prices for <asset>
---
responses:
200:
description: Great success!
"""
assets = client.catalog_metrics()[0]["frequencies"][0]["assets"]
if asset in assets:
df_prices = client.get_asset_metrics(
assets=[asset],
metrics="PriceUSD",
frequency="1d",
).to_dataframe()
# Assign datatypes
df_prices["time"] = pd.to_datetime(df_prices.time)
# Convert time to string
df_prices["time"] = df_prices["time"].dt.strftime("%Y-%m-%d")
df_prices["Price"] = df_prices.PriceUSD.astype(float)
# Reshape dataset so assets are in columns, dates are the rows, and the values are prices
df_prices_pivot = df_prices.pivot(
index="time",
columns="asset",
values="PriceUSD"
)
return json.dumps(df_prices_pivot[asset].to_dict())
else:
return "Asset not found"
# create a route to get the metrics for a specific asset and from a specific date
@app.route('/metrics/<asset>/<start_date>')
def get_metrics_from_date(asset, start_date):
"""
Returns a list of the prices of <asset> from <start_date>
asset: the asset you want to get metrics for
start_date: the start date of the range you want to get metrics for
---
responses:
200:
description: Great success!
"""
assets = client.catalog_metrics()[0]["frequencies"][0]["assets"]
if asset in assets:
df_prices = client.get_asset_metrics(
assets=[asset],
metrics="PriceUSD",
frequency="1d",
start_time=start_date
).to_dataframe()
# Assign datatypes
df_prices["time"] = pd.to_datetime(df_prices.time)
# Convert time to string
df_prices["time"] = df_prices["time"].dt.strftime("%Y-%m-%d")
df_prices["Price"] = df_prices.PriceUSD.astype(float)
# Reshape dataset so assets are in columns, dates are the rows, and the values are prices
df_prices_pivot = df_prices.pivot(
index="time",
columns="asset",
values="PriceUSD"
)
return json.dumps(df_prices_pivot[asset].to_dict())
else:
return "Asset not found"
# create a route to get the metrics for a specific asset and for a specific range of date
@app.route('/metrics/<asset>/<start_date>/<end_date>')
def get_metrics_date(asset, start_date, end_date):
"""
Returns a list of the prices of <asset> from <start_date> to <end_date>
asset: the asset you want to get metrics for
start_date: the start date of the range you want to get metrics for
end_date: the end date of the range you want to get metrics for
---
responses:
200:
description: Great success!
"""
assets = client.catalog_metrics()[0]["frequencies"][0]["assets"]
if asset in assets:
df_prices = client.get_asset_metrics(
assets=[asset],
metrics="PriceUSD",
frequency="1d",
start_time=start_date,
end_time=end_date
).to_dataframe()
# Assign datatypes
df_prices["time"] = pd.to_datetime(df_prices.time)
# Convert time to string
df_prices["time"] = df_prices["time"].dt.strftime("%Y-%m-%d")
df_prices["Price"] = df_prices.PriceUSD.astype(float)
# Reshape dataset so assets are in columns, dates are the rows, and the values are prices
df_prices_pivot = df_prices.pivot(
index="time",
columns="asset",
values="PriceUSD"
)
return json.dumps(df_prices_pivot[asset].to_dict())
else:
return "Asset not found"
# create a route to get the compound interest for a specific asset
@app.route('/compound/<asset>/<amount>/<period>')
def get_compound(asset, amount, period):
"""
Returns a list of the compound interests you could have earned with <amount> $ spent each <period> in <asset> from its creation
asset: the asset you want to invest in ( go to /assets to look at the list of assets )
amount: the amount you want to invest each period ( int )
period: the period you want to invest for ( "d", "w", "y" )
---
responses:
200:
description: Great success!
"""
assets = client.catalog_metrics()[0]["frequencies"][0]["assets"]
if asset in assets and period in ["d", "m", "y"]:
df_prices = client.get_asset_metrics(
assets=[asset],
metrics="PriceUSD",
frequency="1d",
).to_dataframe()
# Assign datatypes
df_prices["time"] = pd.to_datetime(df_prices.time)
# Convert time to string
df_prices["time"] = df_prices["time"].dt.strftime("%Y-%m-%d")
df_prices["Price"] = df_prices.PriceUSD.astype(float)
# Reshape dataset so assets are in columns, dates are the rows, and the values are prices
df_prices_pivot = df_prices.pivot(
index="time",
columns="asset",
values="PriceUSD"
)
# Calculate compound interest
df = Compound(df_prices_pivot, int(amount), period, asset)
return json.dumps(df["Compound"].to_dict())
else:
return "Asset not found"
# create a route to get the compound interest for a specific asset and from a specific date
@app.route('/compound/<asset>/<amount>/<period>/<start_date>')
def get_compound_date_from(asset, amount, period, start_date):
"""
Returns a list of the compound interests you could have earned with <amount> $ spent each <period> in <asset> from <start_date>
asset: the asset you want to invest in ( go to /assets to look at the list of assets )
amount: the amount you want to invest each period ( int )
period: the period you want to invest for ( "d", "w", "y" )
start_date: the start date of the range you want to get compound interest for
---
responses:
200:
description: Great success!
"""
assets = client.catalog_metrics()[0]["frequencies"][0]["assets"]
if asset in assets and period in ["d", "m", "y"]:
df_prices = client.get_asset_metrics(
assets=[asset],
metrics="PriceUSD",
frequency="1d",
start_time=start_date
).to_dataframe()
# Assign datatypes
df_prices["time"] = | pd.to_datetime(df_prices.time) | pandas.to_datetime |
from datetime import datetime
from fastapi import FastAPI
import pandas
import numpy
app = FastAPI()
BASE_URL = 'https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
@app.get('/national_trend/')
def national_trend(start_at: str = None, end_at: str = None):
if start_at is not None:
start_at = pandas.to_datetime(f'{start_at} 18:00', format=DATE_FORMAT)
if end_at is not None:
end_at = | pandas.to_datetime(f'{end_at} 18:00', format=DATE_FORMAT) | pandas.to_datetime |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = | Timedelta(hours=3, minutes=4) | pandas.Timedelta |
# coding: utf-8
"""
Created on Mon May 17 00:00:00 2017
@author: DIP
"""
# # Import necessary dependencies and settings
# In[1]:
import skimage
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage import io
get_ipython().magic('matplotlib inline')
# # Image metadata features
#
# - Image create date & time
# - Image dimensions
# - Image compression format
# - Device Make & Model
# - Image resolution & aspect ratio
# - Image Artist
# - Flash, Aperture, Focal Length & Exposure
# # Raw Image and channel pixel values
# In[2]:
cat = io.imread('datasets/cat.png')
dog = io.imread('datasets/dog.png')
df = pd.DataFrame(['Cat', 'Dog'], columns=['Image'])
print(cat.shape, dog.shape)
# In[3]:
#coffee = skimage.transform.resize(coffee, (300, 451), mode='reflect')
fig = plt.figure(figsize = (8,4))
ax1 = fig.add_subplot(1,2, 1)
ax1.imshow(cat)
ax2 = fig.add_subplot(1,2, 2)
ax2.imshow(dog)
# In[4]:
dog_r = dog.copy() # Red Channel
dog_r[:,:,1] = dog_r[:,:,2] = 0 # set G,B pixels = 0
dog_g = dog.copy() # Green Channel
dog_g[:,:,0] = dog_r[:,:,2] = 0 # set R,B pixels = 0
dog_b = dog.copy() # Blue Channel
dog_b[:,:,0] = dog_b[:,:,1] = 0 # set R,G pixels = 0
plot_image = np.concatenate((dog_r, dog_g, dog_b), axis=1)
plt.figure(figsize = (10,4))
plt.imshow(plot_image)
# In[5]:
dog_r
# # Grayscale image pixel values
# In[6]:
from skimage.color import rgb2gray
cgs = rgb2gray(cat)
dgs = rgb2gray(dog)
print('Image shape:', cgs.shape, '\n')
# 2D pixel map
print('2D image pixel map')
print(np.round(cgs, 2), '\n')
# flattened pixel feature vector
print('Flattened pixel map:', (np.round(cgs.flatten(), 2)))
# # Binning image intensity distribution
# In[7]:
fig = plt.figure(figsize = (8,4))
ax1 = fig.add_subplot(2,2, 1)
ax1.imshow(cgs, cmap="gray")
ax2 = fig.add_subplot(2,2, 2)
ax2.imshow(dgs, cmap='gray')
ax3 = fig.add_subplot(2,2, 3)
c_freq, c_bins, c_patches = ax3.hist(cgs.flatten(), bins=30)
ax4 = fig.add_subplot(2,2, 4)
d_freq, d_bins, d_patches = ax4.hist(dgs.flatten(), bins=30)
# # Image aggregation statistics
# ## RGB ranges
# In[8]:
from scipy.stats import describe
cat_rgb = cat.reshape((168*300), 3).T
dog_rgb = dog.reshape((168*300), 3).T
cs = describe(cat_rgb, axis=1)
ds = describe(dog_rgb, axis=1)
cat_rgb_range = cs.minmax[1] - cs.minmax[0]
dog_rgb_range = ds.minmax[1] - ds.minmax[0]
rgb_range_df = pd.DataFrame([cat_rgb_range, dog_rgb_range],
columns=['R_range', 'G_range', 'B_range'])
pd.concat([df, rgb_range_df], axis=1)
# # Descriptive aggregations
# In[9]:
cat_stats= np.array([np.round(cs.mean, 2),np.round(cs.variance, 2),
np.round(cs.kurtosis, 2),np.round(cs.skewness, 2),
np.round(np.median(cat_rgb, axis=1), 2)]).flatten()
dog_stats= np.array([np.round(ds.mean, 2),np.round(ds.variance, 2),
np.round(ds.kurtosis, 2),np.round(ds.skewness, 2),
np.round(np.median(dog_rgb, axis=1), 2)]).flatten()
stats_df = pd.DataFrame([cat_stats, dog_stats],
columns=['R_mean', 'G_mean', 'B_mean',
'R_var', 'G_var', 'B_var',
'R_kurt', 'G_kurt', 'B_kurt',
'R_skew', 'G_skew', 'B_skew',
'R_med', 'G_med', 'B_med'])
pd.concat([df, stats_df], axis=1)
# # Edge detection
# In[10]:
from skimage.feature import canny
cat_edges = canny(cgs, sigma=3)
dog_edges = canny(dgs, sigma=3)
fig = plt.figure(figsize = (8,4))
ax1 = fig.add_subplot(1,2, 1)
ax1.imshow(cat_edges, cmap='binary')
ax2 = fig.add_subplot(1,2, 2)
ax2.imshow(dog_edges, cmap='binary')
# # Object detection
# In[11]:
from skimage.feature import hog
from skimage import exposure
fd_cat, cat_hog = hog(cgs, orientations=8, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), visualise=True)
fd_dog, dog_hog = hog(dgs, orientations=8, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), visualise=True)
# rescaling intensity to get better plots
cat_hogs = exposure.rescale_intensity(cat_hog, in_range=(0, 0.04))
dog_hogs = exposure.rescale_intensity(dog_hog, in_range=(0, 0.04))
fig = plt.figure(figsize = (10,4))
ax1 = fig.add_subplot(1,2, 1)
ax1.imshow(cat_hogs, cmap='binary')
ax2 = fig.add_subplot(1,2, 2)
ax2.imshow(dog_hogs, cmap='binary')
# In[12]:
print(fd_cat, fd_cat.shape)
# # Localized feature extraction
#
# In[13]:
from mahotas.features import surf
import mahotas as mh
cat_mh = mh.colors.rgb2gray(cat)
dog_mh = mh.colors.rgb2gray(dog)
cat_surf = surf.surf(cat_mh, nr_octaves=8, nr_scales=16, initial_step_size=1, threshold=0.1, max_points=50)
dog_surf = surf.surf(dog_mh, nr_octaves=8, nr_scales=16, initial_step_size=1, threshold=0.1, max_points=54)
fig = plt.figure(figsize = (10,4))
ax1 = fig.add_subplot(1,2, 1)
ax1.imshow(surf.show_surf(cat_mh, cat_surf))
ax2 = fig.add_subplot(1,2, 2)
ax2.imshow(surf.show_surf(dog_mh, dog_surf))
# In[14]:
cat_surf_fds = surf.dense(cat_mh, spacing=10)
dog_surf_fds = surf.dense(dog_mh, spacing=10)
cat_surf_fds.shape
# # Visual Bag of Words model
# ## Engineering features from SURF feature descriptions with clustering
# In[15]:
from sklearn.cluster import KMeans
k = 20
km = KMeans(k, n_init=100, max_iter=100)
surf_fd_features = np.array([cat_surf_fds, dog_surf_fds])
km.fit(np.concatenate(surf_fd_features))
vbow_features = []
for feature_desc in surf_fd_features:
labels = km.predict(feature_desc)
vbow = np.bincount(labels, minlength=k)
vbow_features.append(vbow)
vbow_df = | pd.DataFrame(vbow_features) | pandas.DataFrame |
#!/usr/bin/env python3
from __future__ import print_function
from datetime import datetime
from botocore.exceptions import ClientError
import boto3
import pandas as pd
import logging
import os
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
OUTPUT_QUEUE = os.getenv("OUTPUT_QUEUE")
def send_sqs_message(queue_url, msg):
sqs_client = boto3.client('sqs')
try:
sqs_client.send_message(QueueUrl=queue_url,
MessageBody=json.dumps(msg))
except ClientError as e:
print(e.response['Error']['Message'])
return
def lambda_handler(event, context):
# Create S3 client.
s3_client = boto3.client('s3')
# Read messages from SQS.
for record in event['Records']:
payload = json.loads(record['body'])['Records'][0]
# Calculate today's date.
today = datetime.now().strftime('%Y%m%d')
# Extract bucket name.
bucket = payload['s3']['bucket']['name']
# Construct expected keys.
files = {}
types = ['clients', 'portfolios', 'accounts', 'transactions']
for key in types:
files[key] = "{}_{}.csv".format(key, today)
# Fetch objects from s3.
objects = {}
for key in types:
objects[key] = s3_client.get_object(Bucket=bucket, Key=files[key])
# Read data for each type.
data = {}
for key in types:
data[key] = | pd.read_csv(objects[key]['Body']) | pandas.read_csv |
import wget
import shutil
import pandas as pd
import os
import sys
import tarfile
from sklearn import datasets
def create_imdb_csv(out_dir):
def bar_progress(current, total, width=80):
progress_message = "Downloading: %d%% [%d / %d] bytes" % (current / total * 100,
current, total)
sys.stdout.write("\r" + progress_message)
sys.stdout.flush()
filename = wget.download('https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz',
'data', bar=bar_progress)
print('\nUnzipping')
file = tarfile.open(filename)
file.extractall(out_dir)
file.close()
os.remove(filename)
classes = None
data = {}
for mode in ['train', 'test']:
folder = os.path.join(out_dir, 'aclImdb/') + mode
print('Removing files for ' + mode)
for file in os.listdir(folder):
filepath = os.path.join(folder, file)
if os.path.isdir(filepath) and file == 'unsup':
shutil.rmtree(filepath)
elif not os.path.isdir(filepath):
os.remove(filepath)
print('Creating csv for ' + mode)
d = datasets.load_files(os.path.join(out_dir, 'aclImdb/') + mode, categories=classes, encoding='utf8')
data[mode] = pd.concat([ | pd.Series(d.data) | pandas.Series |
import numpy as np
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pandas as pd
import torch.optim as optim
rides = pd.read_csv('data_bike/hour.csv')
rides.head()
counts = rides['cnt'][:50]
dummy_fields = ['season','weathersit','mnth','hr','weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each],prefix=each)
#这里挨个转换独热编码,转换完了一个就粘贴一个
rides = | pd.concat([rides,dummies],axis=1) | pandas.concat |
import pickle
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from skmultilearn.problem_transform import ClassifierChain
from utils.preprocessing import clean_text
def list2string(list):
return ','.join(map(str, list))
file_tweets = "new_personality_combined.csv"
file_personalities = "personality-data.txt"
data_tweets = | pd.read_csv(file_tweets, sep=",", encoding="utf8", index_col=0) | pandas.read_csv |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFScoreMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python scoring script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
try:
line = input()
if line == '': # Exit if user provides blank line
pass
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
modelSerB64 = allArgs[-1]
except (EOFError): # Exit if reached EOF or CTRL-D
pass
while 1:
try:
line = input()
if line == '': # Exit if user provides blank line
break
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
except (EOFError): # Exit if reached EOF or CTRL-D
break
#for line in sys.stdin.read().splitlines():
# line = line.split(delimiter)
# inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
## In the input information, all rows have the same number of column elements
## except for the first row. The latter also contains the model info in its
## last column. Isolate the serialized model from the end of first row.
#modelSerB64 = inputData[0][-1]
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
#df = pd.DataFrame.from_records(inputData, exclude=['nRow', 'model'], columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = pd.to_numeric(df['tot_cust_years'])
df['tot_children'] = pd.to_numeric(df['tot_children'])
df['female_ind'] = pd.to_numeric(df['female_ind'])
df['single_ind'] = pd.to_numeric(df['single_ind'])
df['married_ind'] = pd.to_numeric(df['married_ind'])
df['separated_ind'] = pd.to_numeric(df['separated_ind'])
df['statecode'] = df['statecode'].apply(lambda x: x.replace('"', ''))
df['ck_acct_ind'] = pd.to_numeric(df['ck_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['ck_avg_bal'] = df['ck_avg_bal'].apply(lambda x: "".join(x.split()))
df['ck_avg_bal'] = pd.to_numeric(df['ck_avg_bal'])
df['sv_avg_bal'] = df['sv_avg_bal'].apply(lambda x: "".join(x.split()))
df['sv_avg_bal'] = pd.to_numeric(df['sv_avg_bal'])
df['cc_avg_bal'] = df['cc_avg_bal'].apply(lambda x: "".join(x.split()))
df['cc_avg_bal'] = pd.to_numeric(df['cc_avg_bal'])
df['ck_avg_tran_amt'] = df['ck_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['ck_avg_tran_amt'] = pd.to_numeric(df['ck_avg_tran_amt'])
df['sv_avg_tran_amt'] = df['sv_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['sv_avg_tran_amt'] = pd.to_numeric(df['sv_avg_tran_amt'])
df['cc_avg_tran_amt'] = df['cc_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['cc_avg_tran_amt'] = pd.to_numeric(df['cc_avg_tran_amt'])
df['q1_trans_cnt'] = pd.to_numeric(df['q1_trans_cnt'])
df['q2_trans_cnt'] = pd.to_numeric(df['q2_trans_cnt'])
df['q3_trans_cnt'] = | pd.to_numeric(df['q3_trans_cnt']) | pandas.to_numeric |
import numpy as np
import os
import pandas as pd
import re
from astropy.coordinates import Angle
from astropy.io import fits
from multiprocessing import Pool
from py_specrebin import rebinspec
spec1d_tags = ['LBIN','SPEC','IVAR','SPECNORMED','IVARNORMED','POS_A',
'PAR_A','RA','DEC','RA_DEG','DEC_DEG','MASK_RA','MASK_DEC']
def one_star(input):
'''
Read the spec1d file, shift each chip to the rest frame, and stitch together
:param input: tuple containing (dataframe, spec1d_file, z, lbin)
:return: tuple containing spec1d file and Series containing all information
'''
spec1d_file, z, zqual, lbin = input
nlbin = len(lbin)
flux = np.zeros(nlbin)
ivar = np.zeros(nlbin)
while sum(flux) == 0:
hdu = fits.open(spec1d_file)
# Check number of extensions
if np.size(hdu) < 5:
log.write('%s has the wrong number of extensions \n' %(spec1d_file))
break
# Are the extensions structure arrays
sB = hdu[1].data
sR = hdu[2].data
if (sB == None) or (sR == None):
log.write('%s is missing a structure \n' %(spec1d_file))
break
if min(sR.LAMBDA[0]) < max(sB.LAMBDA[0] < 25):
sR = hdu[3].data
rSpec = sR.SPEC[0]
bSpec = sB.SPEC[0]
# Does the spectrum actually have values
if ((bSpec[(bSpec != bSpec) | (bSpec == 0.0)]).sum() > 3000) or \
((rSpec[(rSpec != rSpec) | (rSpec == 0.0)]).sum() > 3000):
log.write('%s does not have enough real numbers in its arrays \n' %(spec1d_file))
break
# Shift to rest frame
lrshift = sR.LAMBDA[0]/(1.+z)
lbshift = sB.LAMBDA[0]/(1.+z)
# Rebin red side
specbinR, ivarbinR = rebinspec(lrshift, rSpec, lbin, ivar=sR.IVAR[0])
# Rebin blue side
specbinB, ivarbinB = rebinspec(lbshift, bSpec, lbin, ivar=sB.IVAR[0])
# stitch the sides together
for j in range(nlbin):
if specbinR[j] == specbinR[j]:
flux[j] = specbinR[j]
ivar[j] = ivarbinR[j]
else:
flux[j] = specbinB[j]
ivar[j] = ivarbinB[j]
# Some header information for ease of reading the subsequent lines
header = hdu[1].header
ra = header['RA_OBJ']
dec = header['DEC_OBJ']
tonorm = np.median(flux[(lbin > 7500) & (lbin < 7600)])
data = [lbin, flux, ivar, flux/tonorm, ivar*(tonorm**2), header['SLITPA'], header['PARANG'],
ra, dec,Angle('%s hours'%(ra)).degrees, Angle('%s degrees' %(dec)).degrees,
header['RA'], header['DEC']]
return spec1d_file, pd.Series(dict(zip(spec1d_tags, data)))
def spec1d(spec1d_files, zspec_array, lbin, tags, logfile):
'''
multiprocessing wrapper for the one_star function
:param spec1d_files:
:param z_array: array of tuples containing (z, zqual)
:param lbin: wavelength array to bin things onto
:param tags: tags to output (does not need to just be spec1d tags)
:param logfile:
:return: dataframe containing spec1d info
'''
# initialize df
df = | pd.DataFrame(columns=[st for st in spec1d_tags if st in tags], index=spec1d_files) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytest
from pytest import approx
import oemof.thermal.compression_heatpumps_and_chillers as cmpr_hp_chllr
import oemof.thermal.concentrating_solar_power as csp
import oemof.thermal.absorption_heatpumps_and_chillers as ac
from oemof.thermal.cogeneration import allocate_emissions
from oemof.thermal.stratified_thermal_storage import (calculate_storage_u_value,
calculate_storage_dimensions,
calculate_capacities,
calculate_losses)
from oemof.thermal.solar_thermal_collector import (flat_plate_precalc,
calc_eta_c_flate_plate)
def test_cop_calculation_hp():
cops_HP = cmpr_hp_chllr.calc_cops(
temp_high=[40],
temp_low=[12],
quality_grade=0.4,
mode='heat_pump')
assert cops_HP == [4.473571428571428]
def test_calc_cops_with_Series_01():
ambient_temp_each_hour = {'01:00': 12, '02:00': 12, '03:00': 12}
temp_l_series = pd.Series(ambient_temp_each_hour)
cops_HP = cmpr_hp_chllr.calc_cops(
temp_high=[40],
temp_low=temp_l_series,
quality_grade=0.4,
mode='heat_pump')
assert cops_HP == [4.473571428571428, 4.473571428571428, 4.473571428571428]
def test_calc_cops_with_Series_02():
set_temp_each_hour = {'01:00': 40, '02:00': 40, '03:00': 40}
temp_h_series = pd.Series(set_temp_each_hour)
cops_HP = cmpr_hp_chllr.calc_cops(
temp_high=temp_h_series,
temp_low=[12],
quality_grade=0.4,
mode='heat_pump')
assert cops_HP == [4.473571428571428, 4.473571428571428, 4.473571428571428]
def test_cop_calculation_hp_list_input_01():
cops_HP = cmpr_hp_chllr.calc_cops(
temp_high=[40, 40],
temp_low=[12],
quality_grade=0.4,
mode='heat_pump')
assert cops_HP == [4.473571428571428, 4.473571428571428]
def test_cop_calculation_hp_list_input_02():
cops_HP = cmpr_hp_chllr.calc_cops(
temp_high=[40],
temp_low=[12, 12],
quality_grade=0.4,
mode='heat_pump')
assert cops_HP == [4.473571428571428, 4.473571428571428]
def test_cop_calculation_airsource_hp_with_icing_01():
cops_ASHP = cmpr_hp_chllr.calc_cops(
temp_high=[40],
temp_low=[1.3],
quality_grade=0.5,
mode='heat_pump',
temp_threshold_icing=2,
factor_icing=0.8)
assert cops_ASHP == [3.236692506459949]
def test_cop_calculation_airsource_hp_with_icing_02():
cops_ASHP = cmpr_hp_chllr.calc_cops(
temp_high=[40],
temp_low=[2.3],
quality_grade=0.5,
mode='heat_pump',
temp_threshold_icing=2,
factor_icing=0.8)
assert cops_ASHP == [4.15318302387268]
def test_cop_calculation_chiller():
cops_chiller = cmpr_hp_chllr.calc_cops(
temp_high=[35],
temp_low=[17],
quality_grade=0.45,
mode='chiller')
assert cops_chiller == [7.25375]
def test_raised_exception_01():
"""Test if an exception is raised if temp_low is not a list."""
with pytest.raises(TypeError):
cmpr_hp_chllr.calc_cops(
temp_high=[40],
temp_low=12, # ERROR - temp_low has to be a list!
quality_grade=0.4,
mode='heat_pump',
temp_threshold_icing=2,
factor_icing=0.8)
def test_raised_exception_02():
"""Test if an exception is raised if temp_high is not a list."""
with pytest.raises(TypeError):
cmpr_hp_chllr.calc_cops(
temp_high=40, # ERROR - temp_high has to be a list!
temp_low=[12],
quality_grade=0.4,
mode='heat_pump',
temp_threshold_icing=2,
factor_icing=0.8)
def test_raised_exception_03():
"""Test if an exception is raised if temp_high and
temp_low have different length AND none of them is of length 1."""
with pytest.raises(IndexError):
cmpr_hp_chllr.calc_cops(
temp_high=[40, 39, 39],
temp_low=[12, 10], # ERROR - len(temp_low) has
# to be 1 or equal to len(temp_high)
quality_grade=0.4,
mode='heat_pump',
temp_threshold_icing=2,
factor_icing=0.8)
def test_raised_exception_04():
"""Test if an exception is raised if ... """
with pytest.raises(ValueError):
cmpr_hp_chllr.calc_cops(
temp_high=[39],
temp_low=[17],
quality_grade=0.4,
mode='chiller',
temp_threshold_icing=2,
factor_icing=0.8)
def test_raised_exception_05():
"""Test if an exception is raised if ... """
with pytest.raises(ValueError):
cmpr_hp_chllr.calc_cops(
temp_high=[39],
temp_low=[17],
quality_grade=0.4,
mode='chiller',
temp_threshold_icing=2,
factor_icing=0.8)
def test_calc_max_Q_dot_chill():
nominal_conditions = {
'nominal_Q_chill': 20,
'nominal_el_consumption': 5}
actual_cop = [4.5]
max_Q_chill = cmpr_hp_chllr.calc_max_Q_dot_chill(nominal_conditions,
cops=actual_cop)
assert max_Q_chill == [1.125]
def test_raised_exceptions_05():
with pytest.raises(TypeError):
actual_cop = 4.5 # ERROR - has to be of type list!
nom_cond = {'nominal_Q_chill': 20, 'nominal_el_consumption': 5}
cmpr_hp_chllr.calc_max_Q_dot_chill(nominal_conditions=nom_cond,
cops=actual_cop)
def test_calc_max_Q_dot_heat():
nom_cond = {
'nominal_Q_hot': 20,
'nominal_el_consumption': 5}
actual_cop = [4.5]
max_Q_hot = cmpr_hp_chllr.calc_max_Q_dot_heat(nominal_conditions=nom_cond,
cops=actual_cop)
assert max_Q_hot == [1.125]
def test_calc_chiller_quality_grade():
nom_cond = {
'nominal_Q_chill': 20,
'nominal_el_consumption': 5,
't_high_nominal': 35,
't_low_nominal': 7}
q_grade = cmpr_hp_chllr.calc_chiller_quality_grade(nominal_conditions=nom_cond)
assert q_grade == 0.39978582902016785
def test_calculate_storage_u_value():
params = {
's_iso': 50, # mm
'lamb_iso': 0.05, # W/(m*K)
'alpha_inside': 1, # W/(m2*K)
'alpha_outside': 1 # W/(m2*K)
}
u_value = calculate_storage_u_value(**params)
assert u_value == 1 / 3
def test_calculate_storage_dimensions():
params = {
'height': 10, # m
'diameter': 10, # m
}
volume, surface = calculate_storage_dimensions(**params)
assert volume == approx(250 * np.pi) and surface == approx(150 * np.pi)
def test_calculate_capacities():
params = {
'volume': 1000, # m3
'temp_h': 100, # deg C
'temp_c': 50, # deg C
}
nominal_storage_capacity = calculate_capacities(**params)
assert nominal_storage_capacity == 56.62804059111111
def test_calculate_losses():
params = {
'u_value': 1, # W/(m2*K)
'diameter': 10, # m
'temp_h': 100, # deg C
'temp_c': 50, # deg C
'temp_env': 10, # deg C
}
loss_rate, fixed_losses_relative, fixed_losses_absolute = calculate_losses(**params)
assert loss_rate == 0.0003531819182021882\
and fixed_losses_relative == 0.00028254553456175054\
and fixed_losses_absolute == 0.010210176124166827
def test_allocate_emissions():
emissions_dict = {}
for method in ['iea', 'efficiency', 'finnish']:
emissions_dict[method] = allocate_emissions(
total_emissions=200,
eta_el=0.3,
eta_th=0.5,
method=method,
eta_el_ref=0.525,
eta_th_ref=0.82
)
result = {
'iea': (75.0, 125.0),
'efficiency': (125.0, 75.0),
'finnish': (96.7551622418879, 103.24483775811208)}
assert emissions_dict == result
def test_allocate_emission_series():
emissions_dict = {}
for method in ['iea', 'efficiency', 'finnish']:
emissions_dict[method] = allocate_emissions(
total_emissions=pd.Series([200, 200]),
eta_el=pd.Series([0.3, 0.3]),
eta_th=pd.Series([0.5, 0.5]),
method=method,
eta_el_ref=pd.Series([0.525, 0.525]),
eta_th_ref=pd.Series([0.82, 0.82])
)
default = {
'iea': (
pd.Series([75.0, 75.0]),
pd.Series([125.0, 125.0])
),
'efficiency': (
| pd.Series([125.0, 125.0]) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator,TransformerMixin
from sklearn.preprocessing import OneHotEncoder
class Fuzzification(BaseEstimator,TransformerMixin):
'''
Fuzzification module of a Fuzzy Inference System.
This transformer class is responsible for fitting and transforming the data into membership functions.
'''
def __init__(self, n_fuzzy_sets = 3, triangle_format = 'normal',enable_negation = False):
self.n_fuzzy_sets = n_fuzzy_sets
self.triangle_format = triangle_format
self.enable_negation = enable_negation
def fit(self,X, is_categorical = None, categories = None):
'''
Fit fuzzification parameters according to dataset X.
'''
X = X if type(X) == pd.DataFrame else pd.DataFrame(X)
fuzzy_params = pd.DataFrame(columns = ['min','max','centers','is_categorical','is_binary','categories'], index=list(range(X.shape[1])))
is_categorical = is_categorical or X.shape[1] * [False]
X_numerical = X.loc[:,np.invert(is_categorical)]
if self.triangle_format == 'tukey':
fuzzy_params.loc[np.invert(is_categorical),'centers'] = [self.tukey_centers(x,self.n_fuzzy_sets) for x in X_numerical.values.T]
else:
mins = X_numerical.min()
maxs = X_numerical.max()
fuzzy_params.loc[np.invert(is_categorical),'min'] = mins.values
fuzzy_params.loc[np.invert(is_categorical),'max'] = maxs.values
fuzzy_params.loc[np.invert(is_categorical),'centers'] = [self.normal_centers(mini,maxi,self.n_fuzzy_sets) for mini,maxi in zip(mins,maxs)]
categories = categories or [np.unique(x) for x in X.loc[:,is_categorical].values.T]
fuzzy_params.loc[is_categorical,'categories'] = categories
fuzzy_params['is_binary'] = False
fuzzy_params.loc[is_categorical,'is_binary'] = [x.shape[0] <= 2 for x in categories] if categories is not None else False
fuzzy_params['is_categorical'] = is_categorical
self.fuzzy_params = fuzzy_params
def transform(self,X):
X = X if type(X) == pd.DataFrame else | pd.DataFrame(X) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = | pd.to_datetime([Timestamp.max]) | pandas.to_datetime |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = | frequencies.get_freq(freq) | pandas.tseries.frequencies.get_freq |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, places, viz_tools, visualisations, geo_tools
import xarray as xr
import pandas as pd
import pickle
import os
import gsw
# Extracting winds from the correct path
def getWindVarsYear(year,loc):
''' Given a year, returns the correct directory and nam_fmt for wind forcing as well as the
location of S3 on the corresponding grid.
Parameters:
year: a year value in integer form
loc: the location name as a string. Eg. loc='S3'
Returns:
jW: y-coordinate for the location
iW: x-coordinate for the location
opsdir: path to directory where wind forcing file is stored
nam_fmt: naming convention of the appropriate files
'''
if year>2014:
opsdir='/results/forcing/atmospheric/GEM2.5/operational/'
nam_fmt='ops'
jW,iW=places.PLACES[loc]['GEM2.5 grid ji']
else:
opsdir='/data/eolson/results/MEOPAR/GEMLAM/'
nam_fmt='gemlam'
with xr.open_dataset('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2012m03d01.nc') as gridrefWind:
# always use a post-2011 file here to identify station grid location
lon,lat=places.PLACES[loc]['lon lat']
jW,iW=geo_tools.find_closest_model_point(lon,lat,
gridrefWind.variables['nav_lon'][:,:]-360,gridrefWind.variables['nav_lat'][:,:],
grid='GEM2.5')
# the -360 is needed because longitudes in this case are reported in postive degrees East
return jW,iW,opsdir,nam_fmt
# Metric 1:
def metric1_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 1'):
'The spring bloom date is the peak phytoplankton concentration (averaged from the surface to
3 m depth) within four days of the average upper 3 m nitrate concentration going below 0.5 uM
(the half-saturation concentration) for two consecutive days'
EDIT: 0.5 uM was changed to 2.0 uM to yield more accurate results
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as phyto_alld and no3_alld
Returns:
bloomtime1: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['upper_3m_phyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['upper_3m_no3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
metric1_df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1)
# c) Find first location where nitrate crosses below 0.5 micromolar and
# stays there for 2 days
# NOTE: changed the value to 2 micromolar
location1=np.nan
for i, row in metric1_df.iterrows():
try:
if metric1_df['upper_3m_no3'].iloc[i]<2 and metric1_df['upper_3m_no3'].iloc[i+1]<2:
location1=i
break
except IndexError:
location1=np.nan
print('bloom not found')
# d) Find date with maximum phytoplankton concentration within four days (say 9 day window) of date in c)
if np.isnan(location1):
bloomrange=np.nan
bloomtime1=np.nan
else:
bloomrange=metric1_df[location1-4:location1+5]
bloomtime1=bloomrange.loc[bloomrange.upper_3m_phyto.idxmax(), 'bio_time']
return bloomtime1
# Metric 2:
def metric2_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 2'):
'The first peak in which chlorophyll concentrations in upper 3m are above 5 ug/L for more than two days'
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as sphyto and sno3
Returns:
bloomtime2: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['sphyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['sno3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1)
# to find all the peaks:
df['phytopeaks'] = df.sphyto[(df.sphyto.shift(1) < df.sphyto) & (df.sphyto.shift(-1) < df.sphyto)]
# need to covert the value of interest from ug/L to uM N (conversion factor: 1.8 ug Chl per umol N)
chlvalue=5/1.8
# extract the bloom time date
for i, row in df.iterrows():
try:
if df['sphyto'].iloc[i-1]>chlvalue and df['sphyto'].iloc[i-2]>chlvalue and pd.notna(df['phytopeaks'].iloc[i]):
bloomtime2=df.bio_time[i]
break
elif df['sphyto'].iloc[i+1]>chlvalue and df['sphyto'].iloc[i+2]>chlvalue and pd.notna(df['phytopeaks'].iloc[i]):
bloomtime2=df.bio_time[i]
break
except IndexError:
bloomtime2=np.nan
print('bloom not found')
return bloomtime2
# Metric 3:
def metric3_bloomtime(sphyto,sno3,bio_time):
''' Given datetime array and two 1D arrays of surface phytplankton and nitrate concentrations
over time, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 3'):
'The median + 5% of the annual Chl concentration is deemed “threshold value” for each year.
For a given year, bloom initiation is determined to be the week that first reaches the
threshold value (by looking at weekly averages) as long as one of the two following weeks
was >70% of the threshold value'
Parameters:
sphyto: 1D array of phytoplankton concentrations (in uM N) over time
range of 'bio_time'
sno3: 1D array of nitrate concentrations (in uM N) over time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as sphyto and sno3
Returns:
bloomtime3: the spring bloom date as a single datetime value
'''
# 1) determine threshold value
df = pd.DataFrame({'bio_time':bio_time, 'sphyto':sphyto, 'sno3':sno3})
# a) find median chl value of that year, add 5% (this is only feb-june, should we do the whole year?)
threshold=df['sphyto'].median()*1.05
# b) secondthresh = find 70% of threshold value
secondthresh=threshold*0.7
# 2) Take the average of each week and make a dataframe with start date of week and weekly average
weeklychl = pd.DataFrame(df.resample('W', on='bio_time').sphyto.mean())
weeklychl.reset_index(inplace=True)
# 3) Loop through the weeks and find the first week that reaches the threshold.
# Is one of the two week values after this week > secondthresh?
for i, row in weeklychl.iterrows():
try:
if weeklychl['sphyto'].iloc[i]>threshold and weeklychl['sphyto'].iloc[i+1]>secondthresh:
bloomtime3=weeklychl.bio_time[i]
break
elif weeklychl['sphyto'].iloc[i]>threshold and weeklychl['sphyto'].iloc[i+2]>secondthresh:
bloomtime3=weeklychl.bio_time[i]
break
except IndexError:
bloomtime2=np.nan
print('bloom not found')
return bloomtime3
# Surface monthly average calculation given 2D array with depth and time:
def D2_3monthly_avg(time,x):
''' Given datetime array of 3 months and a 2D array of variable x, over time
and depth, returns an array containing the 3 monthly averages of the
surface values of variable x
Parameters:
time: datetime array of each day starting from the 1st day
of the first month, ending on the last day of the third month
x: 2-dimensional numpy array containing daily averages of the
same length and time frame as 'time', and depth profile
Returns:
jan_x, feb_x, mar_x: monthly averages of variable x at surface
'''
depthx=pd.DataFrame(x)
surfacex=np.array(depthx[[0]]).flatten()
df= | pd.DataFrame({'time':time, 'x':surfacex}) | pandas.DataFrame |
import sys
from collections import namedtuple
import pkg_resources
IS_FROZEN = hasattr(sys, '_MEIPASS')
# backup true function
_true_get_distribution = pkg_resources.get_distribution
# create small placeholder for the dash call
# _flask_compress_version = parse_version(get_distribution("flask-compress").version)
_Dist = namedtuple('_Dist', ['version'])
def _get_distribution(dist):
if IS_FROZEN and dist == 'flask-compress':
return _Dist('1.8.0')
else:
return _true_get_distribution(dist)
# monkey patch the function so it can work once frozen and pkg_resources is of
# no help
pkg_resources.get_distribution = _get_distribution
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, ALL, State, MATCH, ALLSMALLER
import plotly.graph_objects as go
import numpy as np
from cv2 import cv2
from flask import Flask, Response
#somebody on stackoverflow said to use Pillow, bc PIL is dead idk --christian
from PIL import Image
import math
import webbrowser
import os
import pandas as pd
import urllib
#do we need threading?
"""
Simple module that monkey patches pkg_resources.get_distribution used by dash
to determine the version of Flask-Compress which is not available with a
flask_compress.__version__ attribute. Known to work with dash==1.16.3 and
PyInstaller==3.6.
"""
#pyinstaller -c -F --add-data "assets/my.css;assets" --hidden-import "flask-compress" --clean postthanksgivinglayout.py
'''
import pkg_resources
IS_FROZEN = hasattr(sys, '_MEIPASS')
# backup true function
_true_get_distribution = pkg_resources.get_distribution
# create small placeholder for the dash call
# _flask_compress_version = parse_version(get_distribution("flask-compress").version)
_Dist = namedtuple('_Dist', ['version'])
def _get_distribution(dist):
if IS_FROZEN and dist == 'flask-compress':
return _Dist('1.8.0')
else:
return _true_get_distribution(dist)
# monkey patch the function so it can work once frozen and pkg_resources is of
# no help
pkg_resources.get_distribution = _get_distribution
'''
# Global Variables
'''
outputFrame = None
source = 0
server = Flask(__name__)
cam = VideoStream(src=source).start()
lock = threading.Lock()
'''
cameraCrop = [[0,160], [280, 180]]
exposure = 0
contrast = 0
cameraID = 0
spectrumWave = np.linspace(cameraCrop[0][0],cameraCrop[1][0], cameraCrop[1][0] - cameraCrop[0][0])
spectrumPixel = np.linspace(cameraCrop[0][0],cameraCrop[1][0], cameraCrop[1][0] - cameraCrop[0][0])
spectrum = []
ref_spectrum = []
is_ref = False
is_abs = False
server = Flask(__name__)
#what is this Image function?
graph_background = Image.open('assets/spectrumCrop.jpg')
# initialize app stuff
# stylesheet, same one I use on my website (https://cjs3.cc)
external_stylesheets = ['my.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, server=server)
# VideoCamera object
class VideoCamera(object):
def __init__(self):
global cameraID
self.video = cv2.VideoCapture(cameraID)
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
cv2.normalize(image, image, (0+exposure + contrast), (255 + exposure - contrast), cv2.NORM_MINMAX)
global cameraCrop
spec = normalise(getSpectrum(image[cameraCrop[0][1] : cameraCrop[1][1], cameraCrop[0][0]: cameraCrop[1][0]]))
global spectrum
spectrum = spec
image = cv2.rectangle(image, (cameraCrop[0][0], cameraCrop[0][1]), (cameraCrop[1][0], cameraCrop[1][1]), (0,0, 255), 3)
ret, jpeg = cv2.imencode('.png', image[:, :])
return jpeg.tobytes()
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@server.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
# Creating the figure
x = np.arange(700)
fig = go.Figure(data=go.Scatter(x=x, y=x*2))
# app layout/the html involved. Can use markup too for most of it if needed
app.layout = html.Div([
# This div is the top half
html.Div([
# This div is the left column
html.Div([
# The s^3 logo, make sure to import it into the assets folder
html.Img(src=app.get_asset_url("s3 logo.png"), alt="s^3 logo", style={"width":"70%"}),
# Camera Settings heading
html.H3("Camera Settings"),
# Exposure label and sliders
html.Div(
[html.Label("Exposure", style={"vertical-align":"top"}),
dcc.Input(id = "Exposure", type="range", min="-200", max="200", value="0", style={"width":"60%","float":"right"})], style={"top-padding":"5px"}
),
html.Br(),
# Contrast label and slider
html.Div(
[html.Label("Contrast", style={"vertical-align":"top"}),
dcc.Input(id = "Contrast", type="range", min="-100", max="100", value="0", style={"width":"60%","float":"right"})], style={"top-padding":"5px"}
)
], style={"background":"#f3faf0", "padding":"15px"}, className="grid-item"),
# This div is the center column
html.Div([
# This div contains the camera
html.Div(id = "camera", children=[
# The video feed
html.Img(src = "/video_feed", alt = 'Video Feed', width="100%", id = "video_frame")]
),
# Camera input textbox
dcc.Input(id="camera_name", placeholder="Camera ID", type="text",style={"width":"60%"}),
# Refresh page button
html.A(html.Button('Change Camera'),href='/', style={"margin-left":"5%", "float":"right"}),
html.Br(),
# The dropdown menu
html.Div(
# "How does this work?" text
children = [html.P("How does this work?", style={"font-size":"0.5em","display":"inline","margin-bottom":"0px", "padding":"0px"}),
# Text in the dropdown menu
html.Div(
children= [
html.P("Type 0 for built-in camera input", style={"font-size":".8em"}),
html.P("Type 1 for USB camera input", style={"font-size":".8em"}),
html.P("Type an IP address for IP (phone) webcam", style={"font-size":".8em"}),
html.P("Example IP adress: http://123.456.7.890:1234", style={"font-size":".75em"})
],
className="dropdown-content")],
className = "dropdown")]
, style={"background":"#f9faf0", "padding":"15px"}, className="grid-item"),
#The right column
html.Div([
html.H3("Set Input Line"),
# Set both endpoints option
html.H4("Set Endpoints"),
# Point 1
html.P("Point 1",style={"margin-bottom":"0px", "padding":"0px"}),
#x1 input
dcc.Input(id="x1", placeholder="x1", type="text",style={"width":"40%"}),
#I should almost certianly use padding to space the text boxes instead of this tactical two space approach, but also like it works so
html.P(", ", style={"display":"inline", "vertical-align":"bottom", "font-size":"1.5em"}),
#y1 input
dcc.Input(id="y1", placeholder="y1", type="text",style={"width":"40%"}),
# Point 2
html.P("Point 2", style={"margin-bottom":"0px", "padding":"0px"}),
#x2 input
dcc.Input(id="x2", placeholder="x2", type="text",style={"width":"40%"}),
html.P(", ", style={"display":"inline", "vertical-align":"bottom", "font-size":"1.5em"}),
#y2 input
dcc.Input(id="y2", placeholder="y2", type="text",style={"width":"40%"})
],style={"background":"#f0f7fa", "padding":"15px"}, className="grid-item"),
], className="grid-container"),
# The bottom Sections
html.Div([
# The graph box
html.Div([
# The graph itself
dcc.Graph(id="graph", figure=fig, style={"margin-bottom":"10px"}),
dcc.Interval(id = 'interval', interval = 300, n_intervals = 0),
# Callibration
dcc.Input(id="wavelength1", placeholder="327", value = '161', type="text",style={"width":"8%"}),
# The first slider
dcc.Input(id="range1", type="range", min="0", max="1000", value="161", style={"width":"90%", "float":"right"}),
html.Br(),
dcc.Input(id="wavelength2", placeholder="547",value = '782', type="text",style={"width":"8%"}),
# The second slider
dcc.Input(id="range2", type="range", min="0", max="1000", value="782", style={"width":"90%", "float":"right"}),
html.Br(),
], style={"background":"#faf4f0", "padding":"15px"}, className="bgrid-item"),
# The graph options box
html.Div([
html.H3("Graph Options"),
#Buttons Section
html.H4("Graph Display"),
# Intensity button
html.Button("Intensity", id="idInten", n_clicks=0, style={"margin-bottom":"10px", "margin-right":"5px"}),
html.Br(),
# Absorbance button
html.Button("Absorbance", id="idAbsrob", n_clicks=0, style={"margin-bottom":"10px", "margin-right":"5px"}),
html.Br(),
# Calibration button
html.Button("Reference", id="idCalib", n_clicks=0, style={"margin-bottom":"10px", "margin-right":"5px"}),
# Name and save options
html.Div([
html.H4("Name & Save"),
# Name spectrum input
dcc.Input(id="spectrum_name", placeholder="Name Spectrum", type="text", style={"width":"100%"}),
html.Br(),
# Record dpectrum button
#html.Button("Record Spectrum", id="record", n_clicks=0)
html.A(html.Button(
'Download Data'),
id='download-link',
download="rawdata.csv",
href="",
target="_blank")
], style={"vertial-align":"bottom"}),
], style={"background":"#faf0fa", "padding":"15px"}, className="bgrid-item"),
], className="bgrid-container"),
# Aiden's Placeholders
# I am clearly missing something in how to use html
html.P(id='placeholder2', n_clicks = 0),
html.P(id='placeholder3', n_clicks = 0),
html.P(id='placeholder', n_clicks = 0),
html.P(id='placeholder4', n_clicks = 0),
html.P(id='placeholder5', n_clicks = 0),
html.P(id='placeholder6', n_clicks = 0),
html.P(id='placeholder7', n_clicks = 0),
# The help dropdown
html.Details(children=[
# The title of the help dropdown
html.Summary("Help"),
# The inside of the menu.
html.P("This is where we could put a quick how to for users that may be confused.")
], style={"width":"99.5%"})
])
'''app callbacks'''
@app.callback(
Output('placeholder7', 'n_clicks'),
[Input('wavelength1', 'value'), Input('wavelength2', 'value'), Input('range1', 'value'), Input('range2', 'value')])
def update_cal_sliders(wavelength1, wavelength2, range1, range2):
percent1 = 9.514159e-4*float(range1) +.05797
percent2 = 9.514159e-4*float(range2) +.05797
wavelength1 = float(wavelength1)
wavelength2 = float(wavelength2)
#transform = np.polyfit([percent1*len(spectrumPixel)+spectrumPixel[0], percent2*len(spectrumPixel)+spectrumPixel[0]],[wavelength1, wavelength2], 1)
transform = [0,0]
transform[0] = (wavelength1 - wavelength2)/(percent1*len(spectrumPixel)+spectrumPixel[0]- percent2*len(spectrumPixel)+spectrumPixel[0])
transform[1] = transform[0]*-1*percent1*len(spectrumPixel)+spectrumPixel[0] + wavelength1
print('transform',transform)
print('transform1',transform1)
global spectrumWave
spectrumWave = list(map(lambda val: val*transform[0]+ transform[1], spectrumPixel))
print(len(spectrumWave), len(spectrumPixel))
#print(spectrumWave)
#print('wavelengt', wavelength1, wavelength2)
#print('range', range1, range2)
print('percent', percent1, percent2)
#print('percent * lenght', percent1*len(spectrumWave), percent2*len(spectrumWave))
#print(transform)
#print(spectrumWave)
return 1
@app.callback(
Output('download-link', 'download'),
[Input('spectrum_name', 'value')])
def update_download_name(value):
print(value)
if(value == None):
return "rawdata.csv"
if(value == ''):
return "rawdata.csv"
return value + '.csv'
@app.callback(
Output('download-link', 'href'),
[Input('spectrum_name', 'value')])
def update_download_link(value):
print(value)
if(value == None):
return ''
if(value == ''):
return ''
#print(len(spectrumWave))
#print(len(spectrum))
if(is_ref):
specToGraph = calcRef(ref_spectrum, spectrum)
elif(is_abs):
specToGraph = calcAbs(ref_spectrum, spectrum)
else:
specToGraph = spectrum
print(len(spectrumWave), len(specToGraph))
output = {'Wavelength': spectrumWave, 'Intensity': specToGraph}
dff = | pd.DataFrame(output) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': | Series([2, 5], index=['one', 'two']) | pandas.Series |
from typing import Any, Dict, List
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from ..utils import AUTO_DEVICE
from .common import MULTICLASS_STRATEGY, calc_multiclass_loss
from .model_utils import ReversalLayer
from .zoo import register_model
def elicit_lexicon(
weights: np.ndarray, vocab: List[str], colnames: List[str]
) -> pd.DataFrame:
nclass, vocabsize = weights.shape
assert len(colnames) == nclass
df = | pd.DataFrame() | pandas.DataFrame |
# Poslanci a Osoby
# Agenda eviduje osoby, jejich zařazení do orgánů a jejich funkce v orgánech a orgány jako takové.
# Informace viz https://www.psp.cz/sqw/hp.sqw?k=1301.
from os import path
import pandas as pd
import numpy as np
from parlamentikon.utility import *
from parlamentikon.Snemovna import *
from parlamentikon.TabulkyPoslanciOsoby import *
from parlamentikon.setup_logger import log
class PoslanciOsobyBase(SnemovnaZipDataMixin, SnemovnaDataFrame):
"""Obecná třída pro dceřiné třídy (Osoby, Organy, Poslanci, etc.)"""
def __init__(self, stahni=True, *args, **kwargs):
log.debug("--> PoslanciOsobyBase")
super().__init__(*args, **kwargs)
if stahni == True:
self.stahni_zip_data("poslanci")
log.debug("<-- PoslanciOsobyBase")
class TypOrgan(TabulkaTypOrganMixin, PoslanciOsobyBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.nacti_typ_organ()
self.nastav_dataframe(
self.tbl['typ_organ'],
odstran=['priorita'],
vyber=['id_typ_organ', 'nazev_typ_organ_cz', 'nazev_typ_organ_en'])
class Organy(TabulkaOrganyMixin, TypOrgan):
def __init__(self, *args, **kwargs):
log.debug("--> Organy")
super().__init__(*args, **kwargs)
self.nacti_organy()
# Připoj Typu orgánu
suffix = "__typ_organ"
self.tbl['organy'] = pd.merge(left=self.tbl['organy'], right=self.tbl['typ_organ'], on="id_typ_organ", suffixes=("",suffix), how='left')
# Odstraň nedůležité sloupce 'priorita', protože se vzájemně vylučují a nejspíš k ničemu nejsou.
# Tímto se vyhneme varování funkce 'drop_by_inconsistency.
self.tbl['organy'].drop(columns=["priorita", "priorita__typ_organ"], inplace=True)
self.tbl['organy'] = self.drop_by_inconsistency(self.tbl['organy'], suffix, 0.1, 'organy', 'typ_organ')
# Nastav volební období, pokud chybí
if self.volebni_obdobi == None:
self.volebni_obdobi = self._posledni_snemovna().od_organ.year
log.debug(f"Nastavuji začátek volebního období na: {self.volebni_obdobi}.")
if self.volebni_obdobi != -1:
x = self.tbl['organy'][
(self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna')
& (self.tbl['organy'].od_organ.dt.year == self.volebni_obdobi)
]
if len(x) == 1:
self.snemovna = x.iloc[0]
else:
log.error('Bylo nalezeno více sněmoven pro dané volební období!')
raise ValueError
self.tbl['organy'] = self.vyber_platne_organy()
self.nastav_dataframe(self.tbl['organy'])
log.debug("<-- Organy")
def vyber_platne_organy(self, df=None):
if df == None:
df = self.tbl['organy']
if self.volebni_obdobi == -1:
return df
ids_snemovnich_organu = expand_hierarchy(df, 'id_organ', 'organ_id_organ', [self.snemovna.id_organ])
# TODO: Kdy použít od_f místo od_o, resp. do_f místo do_o?
interval_start = df.od_organ\
.mask(df.od_organ.isna(), self.snemovna.od_organ)\
.mask(~df.od_organ.isna(), np.maximum(df.od_organ, self.snemovna.od_organ))
# Pozorování: volebni_obdobi_od není nikdy NaT => interval_start není nikdy NaT
if pd.isna(self.snemovna.do_organ): # příznak posledního volebního období
podminka_interval = (
(interval_start.dt.date <= df.do_organ.dt.date) # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_organ != NaT)
| df.do_organ.isna() # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_organ == NaT)
)
else: # Pozorování: předchozí volební období => interval_end není nikdy NaT
interval_end = df.do_organ\
.mask(df.do_organ.isna(), self.snemovna.do_organ)\
.mask(~df.do_organ.isna(), np.minimum(df.do_organ, self.snemovna.do_organ))
podminka_interval = (interval_start.dt.date <= interval_end.dt.date)
ids_jinych_snemoven = []
x = self._predchozi_snemovna()
if x is not None:
ids_jinych_snemoven.append(x.id_organ)
x = self._nasledujici_snemovna()
if x is not None:
ids_jinych_snemoven.append(x.id_organ)
#ids_jinych_snemovnich_organu = find_children_ids(ids_jinych_snemoven, 'id_organ', df, 'organ_id_organ', ids_jinych_snemoven, 0)
ids_jinych_snemovnich_organu = expand_hierarchy(df, 'id_organ', 'organ_id_organ', ids_jinych_snemoven)
podminka_nepatri_do_jine_snemovny = ~df.id_organ.isin(ids_jinych_snemovnich_organu)
df = df[
(df.id_organ.isin(ids_snemovnich_organu) == True)
| (podminka_interval & podminka_nepatri_do_jine_snemovny)
]
return df
def _posledni_snemovna(self):
"""Pomocná funkce, vrací data poslední sněmovny"""
p = self.tbl['organy'][(self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna') & (self.tbl['organy'].do_organ.isna())].sort_values(by=["od_organ"])
if len(p) == 1:
return p.iloc[0]
else:
return None
def _predchozi_snemovna(self, id_organ=None):
"""Pomocná funkce, vrací data předchozí sněmovny"""
# Pokud nebylo zadáno id_orgánu, implicitně vezmi id_organ dané sněmovny.
if id_organ == None:
id_organ = self.snemovna.id_organ
snemovny = self.tbl['organy'][self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna'].sort_values(by="do_organ").copy()
snemovny['id_predchozi_snemovny'] = snemovny.id_organ.shift(1)
idx = snemovny[snemovny.id_organ == id_organ].iloc[0].id_predchozi_snemovny
p = snemovny[snemovny.id_organ == idx]
assert len(p) <= 1
if len(p) == 1:
return p.iloc[0]
else:
return None
def _nasledujici_snemovna(self, id_organ=None):
"""Pomocná funkce, vrací data následující sněmovny"""
# Pokud nebylo zadáno id_orgánu, implicitně vezmi id_organ dané sněmovny.
if id_organ == None:
id_organ = self.snemovna.id_organ
snemovny = self.tbl['organy'][self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna'].sort_values(by="do_organ").copy()
snemovny['id_nasledujici_snemovny'] = snemovny.id_organ.shift(-1)
idx = snemovny[snemovny.id_organ == id_organ].iloc[0].id_nasledujici_snemovny
p = snemovny[snemovny.id_organ == idx]
assert len(p) <= 1
if len(p) == 1:
return p.iloc[0]
else:
return None
# Tabulka definuje typ funkce v orgánu - pro každý typ orgánu jsou definovány typy funkcí. Texty názvů typu funkce se používají při výpisu namísto textů v Funkce:nazev_funkce_LL .
# Třída TypFunkce nebere v úvahu závislost na volebnim obdobi, protože tu je možné získat až pomocí dceřinných tříd (ZarazeniOsoby).
class TypFunkce(TabulkaTypFunkceMixin, TypOrgan):
def __init__(self, *args, **kwargs):
log.debug("--> TypFunkce")
super().__init__(*args, **kwargs)
self.nacti_typ_funkce()
# Připoj Typu orgánu
suffix="__typ_organ"
self.tbl['typ_funkce'] = pd.merge(
left=self.tbl['typ_funkce'],
right=self.tbl['typ_organ'],
on="id_typ_organ",
suffixes=("", suffix),
how='left'
)
# Odstraň nedůležité sloupce 'priorita', protože se vzájemně vylučují a nejspíš ani k ničemu nejsou.
# Tímto se vyhneme varování v 'drop_by_inconsistency'.
self.tbl['typ_funkce'].drop(columns=["priorita", "priorita__typ_organ"], inplace=True)
self.tbl['typ_funkce'] = self.drop_by_inconsistency(self.tbl['typ_funkce'], suffix, 0.1, t1_name='typ_funkce', t2_name='typ_organ', t1_on='id_typ_organ', t2_on='id_typ_organ')
self.nastav_dataframe(
self.tbl['typ_funkce'],
vyber=['id_typ_funkce', 'typ_funkce_cz', 'typ_funkce_en', 'typ_funkce_obecny'],
odstran=['typ_funkce_obecny__ORIG']
)
log.debug("<-- TypFunkce")
class Funkce(TabulkaFunkceMixin, Organy, TypFunkce):
def __init__(self, *args, **kwargs):
log.debug("--> Funkce")
super().__init__(*args, **kwargs)
self.nacti_funkce()
# Zúžení
self.vyber_platne_funkce()
# Připoj Orgány
suffix = "__organy"
self.tbl['funkce'] = pd.merge(
left=self.tbl['funkce'],
right=self.tbl['organy'],
on='id_organ',
suffixes=("", suffix),
how='left'
)
self.tbl['funkce'] = self.drop_by_inconsistency(self.tbl['funkce'], suffix, 0.1, 'funkce', 'organy')
# Připoj Typ funkce
suffix = "__typ_funkce"
self.tbl['funkce'] = pd.merge(left=self.tbl['funkce'], right=self.tbl['typ_funkce'], on="id_typ_funkce", suffixes=("", suffix), how='left')
# Fix the knows inconsistency in data
x = self.tbl['funkce']
idx = x[(x.id_typ_organ == 42) & (x.id_typ_organ__typ_funkce == 15)].index
log.debug(f"Řešení známé nekonzistence v datech: Upřednostňuji sloupce z tabulky 'funkce' před 'typ_funkce' pro {len(idx)} hodnot.")
to_update = ['id_typ_organ', 'typ_id_typ_organ', 'nazev_typ_organ_cz', 'nazev_typ_organ_en', 'typ_organ_obecny']
for i in to_update:
x.at[idx, i + '__typ_funkce'] = x.loc[idx][i]
self.tbl['funkce'] = self.drop_by_inconsistency(self.tbl['funkce'], suffix, 0.1, 'funkce', 'typ_funkce', t1_on='id_typ_funkce', t2_on='id_typ_funkce')
if self.volebni_obdobi != -1:
assert len(self.tbl['funkce'][self.tbl['funkce'].id_organ.isna()]) == 0
self.nastav_dataframe(self.tbl['funkce'])
log.debug("<-- Funkce")
def vyber_platne_funkce(self):
if self.volebni_obdobi != -1:
self.tbl['funkce'] = self.tbl['funkce'][self.tbl['funkce'].id_organ.isin(self.tbl['organy'].id_organ)]
class Osoby(TabulkaOsobaExtraMixin, TabulkaOsobyMixin, PoslanciOsobyBase):
def __init__(self, *args, **kwargs):
log.debug("--> Osoby")
super(Osoby, self).__init__(*args, **kwargs)
self.nacti_osoby()
self.nacti_osoba_extra()
#suffix='__osoba_extra'
#self.tbl['osoby'] = pd.merge(left=self.tbl['osoby'], right=self.tbl['osoba_extra'], on="id_osoba", how="left", suffixes=('', suffix))
#self.drop_by_inconsistency(self.tbl['osoby'], suffix, 0.1, 'hlasovani', 'osoba_extra', inplace=True)
self.nastav_dataframe(self.tbl['osoby'])
log.debug("<-- Osoby")
class ZarazeniOsoby(TabulkaZarazeniOsobyMixin, Funkce, Organy, Osoby):
def __init__(self, *args, **kwargs):
log.debug("--> ZarazeniOsoby")
super().__init__(*args, **kwargs)
self.nacti_zarazeni_osoby()
# Připoj Osoby
suffix = "__osoby"
self.tbl['zarazeni_osoby'] = pd.merge(left=self.tbl['zarazeni_osoby'], right=self.tbl['osoby'], on='id_osoba', suffixes = ("", suffix), how='left')
self.tbl['zarazeni_osoby'] = self.drop_by_inconsistency(self.tbl['zarazeni_osoby'], suffix, 0.1, 'zarazeni_osoby', 'osoby')
# Připoj Organy
suffix = "__organy"
sub1 = self.tbl['zarazeni_osoby'][self.tbl['zarazeni_osoby'].cl_funkce == 'členství'].reset_index()
if self.volebni_obdobi == -1:
m1 = pd.merge(left=sub1, right=self.tbl['organy'], left_on='id_of', right_on='id_organ', suffixes=("", suffix), how='left')
else:
# Pozor, how='left' nestačí, 'inner' se podílí na zúžení na danou sněmovnu
m1 = pd.merge(left=sub1, right=self.tbl['organy'], left_on='id_of', right_on='id_organ', suffixes=("", suffix), how='inner')
m1 = self.drop_by_inconsistency(m1, suffix, 0.1, 'zarazeni_osoby', 'organy')
# Připoj Funkce
sub2 = self.tbl['zarazeni_osoby'][self.tbl['zarazeni_osoby'].cl_funkce == 'funkce'].reset_index()
if self.volebni_obdobi == -1:
m2 = pd.merge(left=sub2, right=self.tbl['funkce'], left_on='id_of', right_on='id_funkce', suffixes=("", suffix), how='left')
else:
# Pozor, how='left' nestačí, 'inner' se podílí na zúžení na danou sněmovnu
m2 = pd.merge(left=sub2, right=self.tbl['funkce'], left_on='id_of', right_on='id_funkce', suffixes=("", suffix), how='inner')
m2 = self.drop_by_inconsistency(m2, suffix, 0.1, 'zarazeni_osoby', 'funkce')
self.tbl['zarazeni_osoby'] = pd.concat([m1, m2], axis=0, ignore_index=True).set_index('index').sort_index()
# Zúžení na dané volební období
self.vyber_platne_zarazeni_osoby()
self.nastav_dataframe(self.tbl['zarazeni_osoby'])
log.debug("<-- ZarazeniOsoby")
def vyber_platne_zarazeni_osoby(self):
if self.volebni_obdobi != -1:
interval_start = self.tbl['zarazeni_osoby'].od_o\
.mask(self.tbl['zarazeni_osoby'].od_o.isna(), self.snemovna.od_organ)\
.mask(~self.tbl['zarazeni_osoby'].od_o.isna(), np.maximum(self.tbl['zarazeni_osoby'].od_o, self.snemovna.od_organ))
# Pozorování: volebni_obdobi_od není nikdy NaT => interval_start není nikdy NaT
if pd.isna(self.snemovna.do_organ): # příznak posledního volebního období
podminka_interval = (
(interval_start.dt.date <= self.tbl['zarazeni_osoby'].do_o.dt.date) # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_o != NaT)
| (self.tbl['zarazeni_osoby'].do_o.isna()) # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_o == NaT)
)
else: # Pozorování: předchozí volební období => interval_end není nikdy NaT
interval_end = self.tbl['zarazeni_osoby'].do_o\
.mask(self.tbl['zarazeni_osoby'].do_o.isna(), self.snemovna.do_organ)\
.mask(~self.tbl['zarazeni_osoby'].do_o.isna(), np.minimum(self.tbl['zarazeni_osoby'].do_o, self.snemovna.do_organ))
podminka_interval = (interval_start.dt.date <= interval_end.dt.date)
self.tbl['zarazeni_osoby'] = self.tbl['zarazeni_osoby'][podminka_interval]
class Poslanci(TabulkaPoslanciPkgpsMixin, TabulkaPoslanciMixin, ZarazeniOsoby, Organy):
def __init__(self, *args, **kwargs):
log.debug("--> Poslanci")
super().__init__(*args, **kwargs)
self.nacti_poslanci_pkgps()
self.nacti_poslance()
# Zúžení na dané volební období
if self.volebni_obdobi != -1:
self.tbl['poslanci'] = self.tbl['poslanci'][self.tbl['poslanci'].id_organ == self.snemovna.id_organ]
# Připojení informace o osobě, např. jméno a příjmení
suffix = "__osoby"
self.tbl['poslanci'] = | pd.merge(left=self.tbl['poslanci'], right=self.tbl['osoby'], on='id_osoba', suffixes = ("", suffix), how='left') | pandas.merge |
import pandas as pd
from evidently.dashboard.widgets.utils import CutQuantileTransformer
import pytest
@pytest.mark.parametrize(
"side, quantile, test_data",
(
(
"right",
0.50,
| pd.DataFrame({"data": [1, 2, 3, 4]}) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data_dir = '../data/'
train = pd.read_csv(data_dir + 'train.csv')
msc = train[train.city == 'Москва']
(_, msc0), (_, msc1) = msc.groupby('price_type')
counts = msc.groupby('price_type').count()['city'].to_dict()
ratio = counts[0] // counts[1]
df = | pd.concat([msc0] + [msc1] * ratio) | pandas.concat |
__all__ = ['eig_seg',
'initialize_eigenanatomy',
'sparse_decom2']
import numpy as np
from scipy.stats import pearsonr
import pandas as pd
from .. import core
from .. import utils
from ..core import ants_image as iio
def sparse_decom2(inmatrix,
inmask=(None, None),
sparseness=(0.01, 0.01),
nvecs=3,
its=20,
cthresh=(0,0),
statdir=None,
perms=0,
uselong=0,
z=0,
smooth=0,
robust=0,
mycoption=0,
initialization_list=[],
initialization_list2=[],
ell1=10,
prior_weight=0,
verbose=False,
rejector=0,
max_based=False,
version=1):
"""
Decomposes two matrices into paired sparse eigenevectors to
maximize canonical correlation - aka Sparse CCA.
Note: we do not scale the matrices internally. We leave
scaling choices to the user.
ANTsR function: `sparseDecom2`
Arguments
---------
inmatrix : 2-tuple of ndarrays
input as inmatrix=(mat1,mat2). n by p input matrix and n by q input matrix , spatial variable lies along columns.
inmask : 2-tuple of ANTsImage types (optional - one or both)
optional pair of image masks
sparseness : tuple
a pair of float values e.g c(0.01,0.1) enforces an unsigned 99 percent and 90 percent sparse solution for each respective view
nvecs : integer
number of eigenvector pairs
its : integer
number of iterations, 10 or 20 usually sufficient
cthresh : 2-tuple
cluster threshold pair
statdir : string (optional)
temporary directory if you want to look at full output
perms : integer
number of permutations. settings permutations greater than 0 will estimate significance per vector empirically. For small datasets, these may be conservative. p-values depend on how one scales the input matrices.
uselong : boolean
enforce solutions of both views to be the same - requires matrices to be the same size
z : float
subject space (low-dimensional space) sparseness value
smooth : float
smooth the data (only available when mask is used)
robust : boolean
rank transform input matrices
mycoption : integer
enforce 1 - spatial orthogonality, 2 - low-dimensional orthogonality or 0 - both
initialization_list : list
initialization for first view
initialization_list2 : list
initialization for 2nd view
ell1 : float
gradient descent parameter, if negative then l0 otherwise use l1
prior_weight : scalar
Scalar value weight on prior between 0 (prior is weak) and 1 (prior is strong). Only engaged if initialization is used
verbose : boolean
activates verbose output to screen
rejector : scalar
rejects small correlation solutions
max_based : boolean
whether to choose max-based thresholding
Returns
-------
dict w/ following key/value pairs:
`projections` : ndarray
X projections
`projections2` : ndarray
Y projections
`eig1` : ndarray
X components
`eig2` : ndarray
Y components
`summary` : pd.DataFrame
first column is canonical correlations,
second column is p-values (these are `None` if perms > 0)
Example
-------
>>> import numpy as np
>>> import ants
>>> mat = np.random.randn(20, 100)
>>> mat2 = np.random.randn(20, 90)
>>> mydecom = ants.sparse_decom2(inmatrix = (mat,mat2),
sparseness=(0.1,0.3), nvecs=3,
its=3, perms=0)
"""
if inmatrix[0].shape[0] != inmatrix[1].shape[0]:
raise ValueError('Matrices must have same number of rows (samples)')
idim = 3
if isinstance(inmask[0], iio.ANTsImage):
maskx = inmask[0].clone('float')
idim = inmask[0].dimension
hasmaskx = 1
elif isinstance(inmask[0], np.ndarray):
maskx = core.from_numpy(inmask[0], pixeltype='float')
idim = inmask[0].ndim
hasmaskx = 1
else:
maskx = core.make_image([1]*idim, pixeltype='float')
hasmaskx = -1
if isinstance(inmask[1], iio.ANTsImage):
masky = inmask[1].clone('float')
idim = inmask[1].dimension
hasmasky = 1
elif isinstance(inmask[1], np.ndarray):
masky = core.from_numpy(inmask[1], pixeltype='float')
idim = inmask[1].ndim
hasmasky = 1
else:
masky = core.make_image([1]*idim, pixeltype='float')
hasmasky = -1
inmask = [maskx, masky]
if robust > 0:
raise NotImplementedError('robust > 0 not currently implemented')
else:
input_matrices = inmatrix
if idim == 2:
if version == 1:
sccancpp_fn = utils.get_lib_fn('sccanCpp2D')
elif version == 2:
sccancpp_fn = utils.get_lib_fn('sccanCpp2DV2')
input_matrices = (input_matrices[0].tolist(), input_matrices[1].tolist())
elif idim ==3:
if version == 1:
sccancpp_fn = utils.get_lib_fn('sccanCpp3D')
elif version == 2:
sccancpp_fn = utils.get_lib_fn('sccanCpp3DV2')
input_matrices = (input_matrices[0].tolist(), input_matrices[1].tolist())
outval = sccancpp_fn(input_matrices[0], input_matrices[1],
inmask[0].pointer, inmask[1].pointer,
hasmaskx, hasmasky,
sparseness[0], sparseness[1],
nvecs, its,
cthresh[0], cthresh[1],
z, smooth,
initialization_list, initialization_list2,
ell1, verbose,
prior_weight, mycoption, max_based)
p1 = np.dot(input_matrices[0], outval['eig1'].T)
p2 = np.dot(input_matrices[1], outval['eig2'].T)
outcorrs = np.array([pearsonr(p1[:,i],p2[:,i])[0] for i in range(p1.shape[1])])
if prior_weight < 1e-10:
myord = np.argsort(np.abs(outcorrs))[::-1]
outcorrs = outcorrs[myord]
p1 = p1[:, myord]
p2 = p2[:, myord]
outval['eig1'] = outval['eig1'][myord,:]
outval['eig2'] = outval['eig2'][myord,:]
cca_summary = np.vstack((outcorrs,[None]*len(outcorrs))).T
if perms > 0:
cca_summary[:,1] = 0
nsubs = input_matrices[0].shape[0]
for permer in range(perms):
m1 = input_matrices[0][np.random.permutation(nsubs),:]
m2 = input_matrices[1][np.random.permutation(nsubs),:]
outvalperm = sccancpp_fn(m1, m2,
inmask[0].pointer, inmask[1].pointer,
hasmaskx, hasmasky,
sparseness[0], sparseness[1],
nvecs, its,
cthresh[0], cthresh[1],
z, smooth,
initialization_list, initialization_list2,
ell1, verbose,
prior_weight, mycoption, max_based)
p1perm = np.dot(m1, outvalperm['eig1'].T)
p2perm = np.dot(m2, outvalperm['eig2'].T)
outcorrsperm = np.array([pearsonr(p1perm[:,i],p2perm[:,i])[0] for i in range(p1perm.shape[1])])
if prior_weight < 1e-10:
myord = np.argsort(np.abs(outcorrsperm))[::-1]
outcorrsperm = outcorrsperm[myord]
counter = np.abs(cca_summary[:,0]) < np.abs(outcorrsperm)
counter = counter.astype('int')
cca_summary[:,1] = cca_summary[:,1] + counter
cca_summary[:,1] = cca_summary[:,1] / float(perms)
return {'projections': p1,
'projections2': p2,
'eig1': outval['eig1'].T,
'eig2': outval['eig2'].T,
'summary': | pd.DataFrame(cca_summary,columns=['corrs','pvalues']) | pandas.DataFrame |
'''
Key take-away: feature engineering is important. Garbage in = Garbage Out
'''
from cleanData import cleanData
import time
import sys
plotBool = int(sys.argv[1]) if len(sys.argv)>1 else 0
resampleDataBool = int(sys.argv[2]) if len(sys.argv)>2 else 1
MISelectorBool = int(sys.argv[3]) if len(sys.argv)>3 else 0
start = time.time()
data,dataPreCovid,dataPostCovid = cleanData(verbose=0)
end = time.time()
print('Time: Data Extraction: {} seconds'.format(end - start) );
'''
Import libraries needed
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
## General regression and classification functions: validation
from regressionLib import splitCV, plotBetaAccuracy
from regressionLib import confusionMatrix, metrics
from regressionLib import flatten
## Exploration and cluster analysis
from sklearn.cluster import KMeans,MeanShift
from regressionLib import corrMatrix, corrMatrixHighCorr
## Models
from sklearn.linear_model import LogisticRegression,Perceptron
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
## Plots
from regressionLib import plotPredictorVsResponse
'''
Data Dictionaries
'''
## Only select predictors highly correlated with severity
print('Correlation with severity')
def predictorsCorrelatedWithTarget(data):
correlation = [1]
for i in range(1,len(data.columns)):
correlation.append(np.corrcoef(data[[data.columns[0],data.columns[i]]].T)[0,1])
correlation = np.array(correlation)
sortedCorr = np.sort(np.abs(correlation))
sortedCorrIdx = np.argsort(np.abs(correlation))
cols = list(data.columns[sortedCorrIdx[sortedCorr>0.05]]) ## at least 5% correlation needed
return cols
def prepDataForTraining(data):
predictorColNames = list(data.columns)
predictorColNames.remove('Severity')
X = np.array(data[predictorColNames])
targetColNames = ['Severity']
Y = np.array(data['Severity'])
dataDict = {'X':X,
'Y':Y,
'predictorNames':predictorColNames,
'targetName':targetColNames}
return dataDict
#################################################################################################################
# ### TEMP CODE: DELETE LATER
# dataDict = prepDataForTraining(data)
# dataDictPreCovid = prepDataForTraining(dataPreCovid)
# dataDictPostCovid = prepDataForTraining(dataPostCovid)
# # Correlation matrix: ALL VARIABLES
# if plotBool == 0:
# predictors = pd.DataFrame(dataDict['X'], columns=dataDict['predictorNames'])
# fig = corrMatrixHighCorr(predictors)
# fig.savefig('Plots/CorrMatrixHighThreshRAW.svg')
# fig = corrMatrix(predictors)
# fig.savefig('Plots/CorrMatrixRAW.svg')
# predictorsPreCovid = pd.DataFrame(dataDictPreCovid['X'], columns=dataDictPreCovid['predictorNames'])
# fig = corrMatrixHighCorr(predictorsPreCovid)
# fig.savefig('Plots/CorrMatrixHighThreshPreCovidRAW.svg')
# fig = corrMatrix(predictorsPreCovid)
# fig.savefig('Plots/CorrMatrixPreCovidRAW.svg')
# predictorsPostCovid = pd.DataFrame(dataDictPostCovid['X'], columns=dataDictPostCovid['predictorNames'])
# fig = corrMatrixHighCorr(predictorsPostCovid)
# fig.savefig('Plots/CorrMatrixHighThreshPostCovidRAW.svg')
# fig = corrMatrix(predictorsPostCovid)
# fig.savefig('Plots/CorrMatrixPostCovidRAW.svg')
# #################################################################################################################
dataDict = prepDataForTraining(data[predictorsCorrelatedWithTarget(data)])
dataDictPreCovid = prepDataForTraining(dataPreCovid[predictorsCorrelatedWithTarget(dataPreCovid)])
dataDictPostCovid = prepDataForTraining(dataPostCovid[predictorsCorrelatedWithTarget(dataPostCovid)])
## Mutual information between selected predictors and target
# Mutual information: MI(X,Y) = Dkl( P(X,Y) || Px \crossproduct Py)
from sklearn.feature_selection import mutual_info_classif
def mutualInfoPredictorsTarget(dataDict):
MI = mutual_info_classif(dataDict['X'],dataDict['Y'])
return ['{}: {}'.format(name,MI[i]) for i,name in enumerate(dataDict['predictorNames']) ]
if MISelectorBool != 0:
print('Mutual Information: data\n{}\n'.format( mutualInfoPredictorsTarget(dataDict) ) )
print('Mutual Information: dataPreCovid\n{}\n'.format( mutualInfoPredictorsTarget(dataDictPreCovid) ) )
print('Mutual Information: dataPostCovid\n{}\n'.format( mutualInfoPredictorsTarget(dataDictPostCovid) ) )
if resampleDataBool != 0:
from regressionLib import resampleData
dataDict = resampleData(dataDict)
dataDictPreCovid = resampleData(dataDictPreCovid)
dataDictPostCovid = resampleData(dataDictPostCovid)
'''
Correlation matrix: Features
'''
if plotBool != 0:
predictors = pd.DataFrame(dataDict['X'], columns=dataDict['predictorNames'])
fig = corrMatrixHighCorr(predictors)
fig.savefig('Plots/CorrMatrixHighThreshfeat.svg')
fig = corrMatrix(predictors)
fig.savefig('Plots/CorrMatrixfeat.svg')
predictorsPreCovid = pd.DataFrame(dataDictPreCovid['X'], columns=dataDictPreCovid['predictorNames'])
fig = corrMatrixHighCorr(predictorsPreCovid)
fig.savefig('Plots/CorrMatrixHighThreshPreCovidfeat.svg')
fig = corrMatrix(predictorsPreCovid)
fig.savefig('Plots/CorrMatrixPreCovidfeat.svg')
predictorsPostCovid = pd.DataFrame(dataDictPostCovid['X'], columns=dataDictPostCovid['predictorNames'])
fig = corrMatrixHighCorr(predictorsPostCovid)
fig.savefig('Plots/CorrMatrixHighThreshPostCovidfeat.svg')
fig = corrMatrix(predictorsPostCovid)
fig.savefig('Plots/CorrMatrixPostCovidfeat.svg')
# #############################################################################
# sys.exit("Just wanted correlation matrices lol")
# #############################################################################
## Initial model selection study: using testTrain split and credible intervals, binomial significance
'''
Training models: Base model
'''
XTrain,XTest,YTrain,YTest,idxTrain,idxTest = splitCV(dataDict['X'],
dataDict['Y'],
returnIdx=True).testTrain(testRatio=0.05)
XTrainPreCovid,XTestPreCovid,YTrainPreCovid,YTestPreCovid,idxTrainPreCovid,idxTestPreCovid = splitCV(dataDictPreCovid['X'],
dataDictPreCovid['Y'],
returnIdx=True).testTrain(testRatio=0.05)
XTrainPostCovid,XTestPostCovid,YTrainPostCovid,YTestPostCovid,idxTrainPostCovid,idxTestPostCovid = splitCV(dataDictPostCovid['X'],
dataDictPostCovid['Y'],
returnIdx=True).testTrain(testRatio=0.05)
'''
Train Models and Test: Draw beta distribution of accuracy.
## base model: logistic regression (location 0)
## All multiclass classifiers are declared here and fit(), predict() methods form sklearn model classes are used
'''
Mdls = {'MdlName': ['Logistic Regression',
'Random Forest: Bootstrap Aggregation',
'Random Forest: AdaBoost',
'Neural Network: 3 hidden layers, 50 hidden units'],
'Mdl': [ LogisticRegression(max_iter=5000) ,
RandomForestClassifier(n_estimators=100,criterion='entropy',max_depth=10,min_samples_leaf=100,min_samples_split=150,bootstrap=True),
AdaBoostClassifier(base_estimator = DecisionTreeClassifier(criterion='entropy',max_depth=5) , n_estimators=50, learning_rate=1),
MLPClassifier(hidden_layer_sizes=(50,50,50,), alpha=0.1 , max_iter=2000, activation = 'logistic', solver='adam') ],
'Predictions': np.zeros(shape=(4,),dtype='object'),
'Confusion Matrix': np.zeros(shape=(4,),dtype='object') }
MdlsPreCovid = {'MdlName': ['Logistic Regression: Pre-Covid',
'Random Forest: Bootstrap Aggregation: Pre-Covid',
'Random Forest: AdaBoost: Pre-Covid',
'Neural Network: 3 hidden layers, 10 hidden units'],
'Mdl':[LogisticRegression(max_iter=5000) ,
RandomForestClassifier(n_estimators=100,criterion='entropy',max_depth=10,min_samples_leaf=100,min_samples_split=150,bootstrap=True),
AdaBoostClassifier(base_estimator = DecisionTreeClassifier(criterion='entropy',max_depth=5) , n_estimators=50, learning_rate=1),
MLPClassifier(hidden_layer_sizes=(50,50,50,), alpha=0.1 , max_iter=2000, activation = 'logistic', solver='adam') ],
'Predictions': np.zeros(shape=(4,),dtype='object'),
'Confusion Matrix': np.zeros(shape=(4,),dtype='object') }
MdlsPostCovid = {'MdlName': ['Logistic Regression: Post-Covid',
'Random Forest: Bootstrap Aggregation: Post-Covid',
'Random Forest: AdaBoost: Post-Covid',
'Neural Network: 3 hidden layers, 10 hidden units'],
'Mdl':[LogisticRegression(max_iter=5000) ,
RandomForestClassifier(n_estimators=100,criterion='entropy',max_depth=10,min_samples_leaf=100,min_samples_split=150,bootstrap=True),
AdaBoostClassifier(base_estimator = DecisionTreeClassifier(criterion='entropy',max_depth=5) , n_estimators=50, learning_rate=1),
MLPClassifier(hidden_layer_sizes=(50,50,50,), alpha=0.1 , max_iter=2000, activation = 'logistic', solver='adam') ],
'Predictions': np.zeros(shape=(4,),dtype='object'),
'Confusion Matrix': np.zeros(shape=(4,),dtype='object') }
## Fit sklearn models
def fitTestModel(Mdl,MdlName,XTrain,YTrain,XTest,YTest,saveLocation=None):
start = time.time()
Mdl.fit(XTrain, YTrain)
end = time.time()
print('Time: {}: {} seconds'.format(MdlName,end - start) )
pred = []
for i in range(XTest.shape[0]):
pred.append(Mdl.predict(XTest[i].reshape(1,-1)))
pred = np.array(pred).reshape(YTest.shape)
accuracy = np.mean(pred == YTest)
print('Accuracy: {}'.format(accuracy) )
if type(saveLocation)!=type(None):
plotBetaAccuracy(accuracy,XTest.shape[0],saveLocation)
else:
plotBetaAccuracy(accuracy,XTest.shape[0])
cMatrix = confusionMatrix(classificationTest = pred,
Ytest = pd.Series(YTest))
overallAccuracy, userAccuracy, producerAccuracy, kappaCoeff = metrics(cMatrix)
print('Overall Accuracy: {}'.format(np.round(overallAccuracy,3)))
print("User's Accuracy: {}".format(np.round(userAccuracy,3)))
print("Producer's Accuracy: {}".format(np.round(producerAccuracy,3)))
print('Kappa Coefficient: {}\n'.format(np.round(kappaCoeff,6)))
print('########################################################\n')
return Mdl,pred,cMatrix
def cMatrixPlots(cMatrixList,YTest,MdlNames):
## DO NOT CALL THIS FUNCTION IN SCRIPT. Use it only in jupyter to plot confusion matrices
fig,axs = plt.subplots(nrows=2,ncols=np.ceil(len(cMatrixList)/2).astype(int),figsize=(3*len(cMatrixList),8))
ax = axs.reshape(-1)
cMatrixLabels = list(pd.Series(YTest).unique())
if len(cMatrixList)<=1:
ax = [ax]
for i,cMatrix in enumerate(cMatrixList):
img = ax[i].imshow(cMatrix,cmap='gray')
ax[i].set_xticks(np.arange(len(cMatrixLabels)))
ax[i].set_xticklabels(cMatrixLabels)
ax[i].set_yticks(np.arange(len(cMatrixLabels)))
ax[i].set_yticklabels(cMatrixLabels)
ax[i].set_xlabel('Severity Class (Actual)')
ax[i].set_ylabel('Severity Class (Predicted)')
ax[i].set_title(MdlNames[i])
for j in range(len(cMatrixLabels)):
for k in range(len(cMatrixLabels)):
ax[i].text(j-0.25,k,int(cMatrix[k,j]),color='blue',fontweight='semibold',fontsize=18)
fig.colorbar(mappable=img,ax = ax[i], fraction=0.1)
fig.tight_layout()
return fig,ax
def cMatrixPlot_single(cMatrix,YTest,MdlName):
## DO NOT CALL THIS FUNCTION IN SCRIPT. Use it only in jupyter to plot confusion matrices
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(3.5,3.5))
cMatrixLabels = list(pd.Series(YTest).unique())
img = ax.imshow(cMatrix,cmap='gray')
ax.set_xticks(np.arange(len(cMatrixLabels)))
ax.set_xticklabels(cMatrixLabels)
ax.set_yticks(np.arange(len(cMatrixLabels)))
ax.set_yticklabels(cMatrixLabels)
ax.set_xlabel('Severity Class (Actual)')
ax.set_ylabel('Severity Class (Predicted)')
ax.set_title(MdlName)
for j in range(len(cMatrixLabels)):
for k in range(len(cMatrixLabels)):
ax.text(j-0.25,k,int(cMatrix[k,j]),color='blue',fontweight='semibold',fontsize=18)
fig.colorbar(mappable=img,ax = ax, fraction=0.1)
fig.tight_layout()
return fig,ax
for i in range(len(Mdls['Mdl'])):
Mdls['Mdl'][i] , \
Mdls['Predictions'][i], \
Mdls['Confusion Matrix'][i] = fitTestModel(Mdl=Mdls['Mdl'][i],MdlName=Mdls['MdlName'][i],
XTrain=XTrain, YTrain=YTrain, XTest=XTest, YTest=YTest,
saveLocation='./Plots/report plots/mdlSelection/beta_{}.eps'.format(i))
for i in range(len(MdlsPreCovid['Mdl'])):
MdlsPreCovid['Mdl'][i] , \
MdlsPreCovid['Predictions'][i], \
MdlsPreCovid['Confusion Matrix'][i] = fitTestModel(Mdl=MdlsPreCovid['Mdl'][i],MdlName=MdlsPreCovid['MdlName'][i],
XTrain=XTrainPreCovid, YTrain=YTrainPreCovid, XTest=XTestPreCovid, YTest=YTestPreCovid)
for i in range(len(MdlsPostCovid['Mdl'])):
MdlsPostCovid['Mdl'][i] , \
MdlsPostCovid['Predictions'][i], \
MdlsPostCovid['Confusion Matrix'][i] = fitTestModel(Mdl=MdlsPostCovid['Mdl'][i],MdlName=MdlsPostCovid['MdlName'][i],
XTrain=XTrainPostCovid, YTrain=YTrainPostCovid, XTest=XTestPostCovid, YTest=YTestPostCovid)
if plotBool != 0:
predictorsTest = pd.DataFrame(XTest, columns=dataDict['predictorNames'])
for i in range(len(predictorsTest.columns)):
fig = plotPredictorVsResponse(predictorsDataFrame=predictorsTest,
predictorName=predictorsTest.columns[i],
actualResponse=YTest,
predictedResponse=Mdls['Predictions'][0],
hueVarName='preCovid',
labels=['Pre-Covid','Post-Covid'])
fig.savefig('./Plots/Logistic results/complete data/fig_{}.jpg'.format(i),dpi=300)
predictorsTestPreCovid = pd.DataFrame(XTestPreCovid, columns=dataDictPreCovid['predictorNames'])
for i in range(len(predictorsTestPreCovid.columns)):
fig = plotPredictorVsResponse(predictorsDataFrame=predictorsTestPreCovid,
predictorName=predictorsTestPreCovid.columns[i],
actualResponse=YTestPreCovid,
predictedResponse=MdlsPreCovid['Predictions'][0],
hueVarName=None,
labels=['Pre-Covid','Post-Covid'])
fig.savefig('./Plots/Logistic results/preCovid/fig_{}.jpg'.format(i),dpi=300)
'''
Perceptron
'''
def predictPerceptron(Wx):
predictions = []
for val in Wx:
if val>0: predictions.append(1)
else: predictions.append(0)
return predictions
## One vs All perceptron multi-class classifier
def perceptronOnevsAll(XTrain,YTrain,XTest,YTest):
## One vs All
YTrainDummies = pd.get_dummies(YTrain)
YTestDummies = | pd.get_dummies(YTest) | pandas.get_dummies |
import pandas as pd
import numpy as np
from statistics import mode
class autodataclean:
'''
A.1) Automated Data Cleaning; identify invalid values and/or rows and automatically solve the problem-
NAN, missing, outliers, unreliable values, out of the range, automated data input.
(Your group decide a solution for the each problem!)
Reference - http://pandas.pydata.org/pandas-docs/stable/missing_data.html
Process -
1. Check type of column - numeric/non-numeric
2. For non-numeric -
a. Replace missing and out of range by most common (mode) in dev
3. For numeric -
a. Compute dev mean, median, min and max excluding outliers and unreliable values
b. For automated -
i. Replace NA and unreliable by mean of dev
ii. Replace outliers and out of range by min or max of dev as applicable
c. For human assisted -
i. For NAs and unreliable values, give option of replacing by mean, median or user input value
ii. For outliers and out of range values, give option of replacing by mean, median, min, max or user input
Note - Replacement values are always computed on dev and replacements in val are always same as dev treatment
Note - Exclude ID and target from cleaning process
Note - case 1 : one file, like MBD_FA2; case 2 : multiple files, one dev and others val, test, oot etc.
'''
def __init__(self, traindata, testdata = None):
'''Constructor for this class'''
self.traindata = pd.DataFrame(traindata)
if testdata is not None:
self.testdata = | pd.DataFrame(testdata) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple Apple Health XML to CSV
==============================
:File: convert.py
:Description: Convert Apple Health "export.xml" file into a csv
:Version: 0.0.1
:Created: 2019-10-04
:Authors: <NAME> (jam)
:Dependencies: An export.xml file from Apple Health
:License: BSD-2-Clause
"""
# %% Imports
import pandas as pd
import xml.etree.ElementTree as ET
import datetime as dt
# %% Function Definitions
def pre_process():
"""Pre-processes the XML file by replacing specific bits that would
normally result in a ParseError
"""
print("Pre-processing...", end="")
with open("export.xml") as f:
newText = f.read().replace("\x0b", "")
# with open("apple_health_export_2/new_export.xml", "w") as f:
with open("processed_export.xml", "w") as f:
f.write(newText)
print("done!")
return
def convert_xml():
"""Loops through the element tree, retrieving all objects, and then
combining them together into a dataframe
"""
print("Converting XML File...", end="")
etree = ET.parse("processed_export.xml")
attribute_list = []
for child in etree.getroot():
child_attrib = child.attrib
for metadata_entry in list(child):
metadata_values = list(metadata_entry.attrib.values())
if len(metadata_values) == 2:
metadata_dict = {metadata_values[0]: metadata_values[1]}
child_attrib.update(metadata_dict)
attribute_list.append(child_attrib)
health_df = pd.DataFrame(attribute_list)
# Every health data type and some columns have a long identifer
# Removing these for readability
health_df.type = health_df.type.str.replace("HKQuantityTypeIdentifier", "")
health_df.type = health_df.type.str.replace("HKCategoryTypeIdentifier", "")
health_df.columns = health_df.columns.str.replace(
"HKCharacteristicTypeIdentifier", ""
)
# Reorder some of the columns for easier visual data review
original_cols = list(health_df)
shifted_cols = [
"type",
"sourceName",
"value",
"unit",
"startDate",
"endDate",
"creationDate",
]
# Add loop specific column ordering if metadata entries exist
if "com.loopkit.InsulinKit.MetadataKeyProgrammedTempBasalRate" in original_cols:
shifted_cols.append("com.loopkit.InsulinKit.MetadataKeyProgrammedTempBasalRate")
if "com.loopkit.InsulinKit.MetadataKeyScheduledBasalRate" in original_cols:
shifted_cols.append("com.loopkit.InsulinKit.MetadataKeyScheduledBasalRate")
if "com.loudnate.CarbKit.HKMetadataKey.AbsorptionTimeMinutes" in original_cols:
shifted_cols.append("com.loudnate.CarbKit.HKMetadataKey.AbsorptionTimeMinutes")
remaining_cols = list(set(original_cols) - set(shifted_cols))
reordered_cols = shifted_cols + remaining_cols
health_df = health_df.reindex(labels=reordered_cols, axis="columns")
# Sort by newest data first
health_df.sort_values(by="startDate", ascending=False, inplace=True)
print("done!")
return health_df
def save_to_csv(health_df):
print("Saving CSV file...", end="")
today = dt.datetime.now().strftime("%Y-%m-%d")
originalRowCount = health_df.shape[0]
originalColCount = health_df.shape[1]
print("Original row/col count:", originalRowCount, originalColCount)
health_df["startDate"] = | pd.to_datetime(health_df["startDate"], errors="coerce") | pandas.to_datetime |
import logging
from operator import itemgetter
from logging.config import dictConfig
from datetime import datetime, timedelta, date
from math import ceil
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from chinese_calendar import get_holidays
import plotly.graph_objects as go
import numpy as np
from keysersoze.models import (
Deal,
Asset,
AssetMarketHistory,
)
from keysersoze.utils import (
get_accounts_history,
get_accounts_summary,
)
from keysersoze.apps.app import APP
from keysersoze.apps.utils import make_card_component
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
pd.options.mode.chained_assignment = 'raise'
COLUMN_MAPPINGS = {
'code': '代码',
'name': '名称',
'ratio': '占比',
'return_rate': '收益率',
'cost': '投入',
'avg_cost': '成本',
'price': '价格',
'price_date': '价格日期',
'amount': '份额',
'money': '金额',
'return': '收益',
'action': '操作',
'account': '账户',
'date': '日期',
'time': '时间',
'fee': '费用',
'position': '仓位',
'day_return': '日收益',
}
FORMATS = {
'价格日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'时间': {'type': 'datetime', 'format': Format(nully='N/A')},
'占比': {'type': 'numeric', 'format': Format(scheme='%', precision=2)},
'收益率': {'type': 'numeric', 'format': Format(nully='N/A', scheme='%', precision=2)},
'份额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'金额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'费用': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'投入': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'成本': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'价格': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'收益': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
}
ACCOUNT_PRIORITIES = {
'长期投资': 0,
'长赢定投': 1,
'U定投': 2,
'投资实证': 3,
'稳健投资': 4,
'证券账户': 6,
'蛋卷基金': 7,
}
all_accounts = [deal.account for deal in Deal.select(Deal.account).distinct()]
all_accounts.sort(key=lambda name: ACCOUNT_PRIORITIES.get(name, 1000))
layout = html.Div(
[
dcc.Store(id='assets'),
dcc.Store(id='stats'),
dcc.Store(id='accounts_history'),
dcc.Store(id='index_history'),
dcc.Store(id='deals'),
dcc.Store(id='start-date'),
dcc.Store(id='end-date'),
html.H3('投资账户概览'),
dbc.Checklist(
id='show-money',
options=[{'label': '显示金额', 'value': 'show'}],
value=[],
switch=True,
),
html.Hr(),
dbc.InputGroup(
[
dbc.InputGroupAddon('选择账户', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='checklist',
options=[{'label': a, 'value': a} for a in all_accounts],
value=[all_accounts[0]],
inline=True,
className='my-auto'
),
],
className='my-2',
),
html.Div(id='account-summary'),
html.Br(),
dbc.Tabs([
dbc.Tab(
label='资产走势',
children=[
dcc.Graph(
id='asset-history-chart',
config={
'displayModeBar': False,
}
),
]
),
dbc.Tab(
label='累计收益走势',
children=[
dcc.Graph(
id="total-return-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='累计收益率走势',
children=[
dbc.InputGroup(
[
dbc.InputGroupAddon('比较基准', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='compare',
options=[
{'label': '中证全指', 'value': '000985.CSI'},
{'label': '上证指数', 'value': '000001.SH'},
{'label': '深证成指', 'value': '399001.SZ'},
{'label': '沪深300', 'value': '000300.SH'},
{'label': '中证500', 'value': '000905.SH'},
],
value=['000985.CSI'],
inline=True,
className='my-auto'
),
],
className='my-2',
),
dcc.Graph(
id="return-curve-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='日收益历史',
children=[
dcc.Graph(
id="day-return-chart",
config={
'displayModeBar': False
},
),
]
),
]),
html.Center(
[
dbc.RadioItems(
id="date-range",
className='btn-group',
labelClassName='btn btn-light border',
labelCheckedClassName='active',
options=[
{"label": "近一月", "value": "1m"},
{"label": "近三月", "value": "3m"},
{"label": "近半年", "value": "6m"},
{"label": "近一年", "value": "12m"},
{"label": "今年以来", "value": "thisyear"},
{"label": "本月", "value": "thismonth"},
{"label": "本周", "value": "thisweek"},
{"label": "所有", "value": "all"},
{"label": "自定义", "value": "customized"},
],
value="thisyear",
),
],
className='radio-group',
),
html.Div(
id='customized-date-range-container',
children=[
dcc.RangeSlider(
id='customized-date-range',
min=2018,
max=2022,
step=None,
marks={year: str(year) for year in range(2018, 2023)},
value=[2018, 2022],
)
],
className='my-auto ml-0 mr-0',
style={'max-width': '100%', 'display': 'none'}
),
html.Hr(),
dbc.Tabs([
dbc.Tab(
label='持仓明细',
children=[
html.Br(),
dbc.Checklist(
id='show-cleared',
options=[{'label': '显示清仓品种', 'value': 'show'}],
value=[],
switch=True,
),
html.Div(id='assets_cards'),
html.Center(
[
dbc.RadioItems(
id="assets-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
dbc.Tab(
label='交易记录',
children=[
html.Br(),
html.Div(id='deals_table'),
html.Center(
[
dbc.RadioItems(
id="deals-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
])
],
)
@APP.callback(
[
dash.dependencies.Output('assets', 'data'),
dash.dependencies.Output('stats', 'data'),
dash.dependencies.Output('accounts_history', 'data'),
dash.dependencies.Output('index_history', 'data'),
dash.dependencies.Output('deals', 'data'),
dash.dependencies.Output('deals-pagination', 'options'),
dash.dependencies.Output('assets-pagination', 'options'),
],
[
dash.dependencies.Input('checklist', 'value'),
dash.dependencies.Input('compare', 'value'),
],
)
def update_after_check(accounts, index_codes):
accounts = accounts or all_accounts
summary_data, assets_data = get_accounts_summary(accounts)
history = get_accounts_history(accounts).to_dict('records')
history.sort(key=itemgetter('account', 'date'))
index_history = []
for index_code in index_codes:
index = Asset.get(zs_code=index_code)
for record in index.history:
index_history.append({
'account': index.name,
'date': record.date,
'price': record.close_price
})
index_history.sort(key=itemgetter('account', 'date'))
deals = []
for record in Deal.get_deals(accounts):
deals.append({
'account': record.account,
'time': record.time,
'code': record.asset.zs_code,
'name': record.asset.name,
'action': record.action,
'amount': record.amount,
'price': record.price,
'money': record.money,
'fee': record.fee,
})
deals.sort(key=itemgetter('time'), reverse=True)
valid_deals_count = 0
for item in deals:
if item['action'] == 'fix_cash':
continue
if item['code'] == 'CASH' and item['action'] == 'reinvest':
continue
valid_deals_count += 1
pagination_options = [
{'label': idx + 1, 'value': idx}
for idx in range(ceil(valid_deals_count / 100))
]
assets_pagination_options = []
return (
assets_data,
summary_data,
history,
index_history,
deals,
pagination_options,
assets_pagination_options
)
@APP.callback(
dash.dependencies.Output('account-summary', 'children'),
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_summary(stats, show_money):
body_content = []
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '总资产',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['money'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '日收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['day_return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['day_return_rate'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '累计收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['return_rate'] if stats['amount'] > 0 else 'N/A(已清仓)',
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '年化收益率',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['annualized_return'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True,
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '现金',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['cash'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '仓位',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['position'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto',
color='primary',
)
return [card]
@APP.callback(
dash.dependencies.Output('assets_cards', 'children'),
[
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('show-cleared', 'value'),
]
)
def update_assets_table(assets_data, show_money, show_cleared):
cards = [html.Hr()]
for row in assets_data:
if not show_cleared and abs(row['amount']) <= 0.001:
continue
if row["code"] in ('CASH', 'WZZNCK'):
continue
cards.append(make_asset_card(row, show_money))
cards.append(html.Br())
return cards
def make_asset_card(asset_info, show_money=True):
def get_color(value):
if not isinstance(value, (float, int)):
return None
if value > 0:
return 'text-danger'
if value < 0:
return 'text-success'
return None
header = dbc.CardHeader([
html.H5(
html.A(
f'{asset_info["name"]}({asset_info["code"]})',
href=f'/asset/{asset_info["code"].replace(".", "").lower()}',
target='_blank'
),
className='mb-0'
),
html.P(f'更新日期 {asset_info["price_date"]}', className='mb-0'),
])
body_content = []
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '持有金额/份额'},
{'item_cls': html.H4, 'type': 'money', 'content': asset_info['money']},
{'item_cls': html.P, 'type': 'amount', 'content': asset_info['amount']}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '日收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['day_return'],
'color': get_color(asset_info['day_return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['day_return_rate'],
'color': get_color(asset_info['day_return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '现价/成本'},
{'item_cls': html.H4, 'type': 'price', 'content': asset_info['price']},
{'item_cls': html.P, 'type': 'price', 'content': asset_info['avg_cost'] or 'N/A'}
],
show_money=show_money,
)
)
asset = Asset.get(zs_code=asset_info['code'])
prices = []
for item in asset.history.order_by(AssetMarketHistory.date.desc()).limit(10):
if item.close_price is not None:
prices.append({
'date': item.date,
'price': item.close_price,
})
else:
prices.append({
'date': item.date,
'price': item.nav,
})
if len(prices) >= 10:
break
prices.sort(key=itemgetter('date'))
df = pd.DataFrame(prices)
df['date'] = pd.to_datetime(df['date'])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['price'],
showlegend=False,
marker={'color': 'orange'},
mode='lines+markers',
)
)
fig.update_layout(
width=150,
height=100,
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
yaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
)
fig.update_xaxes(
rangebreaks=[
{'bounds': ["sat", "mon"]},
{
'values': get_holidays(df.date.min(), df.date.max(), False)
}
]
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '十日走势'},
{
'item_cls': None,
'type': 'figure',
'content': fig
}
],
show_money=show_money
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '累计收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['return'],
'color': get_color(asset_info['return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['return_rate'],
'color': get_color(asset_info['return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '占比'},
{'item_cls': html.H4, 'type': 'percent', 'content': asset_info['position']},
],
show_money=show_money,
)
)
card = dbc.Card(
[
header,
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('return-curve-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('index_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_return_chart(accounts_history, index_history, start_date, end_date):
df = pd.DataFrame(accounts_history)[['amount', 'account', 'date', 'nav']]
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
def getMetroStatus():
import http.client, urllib.request, urllib.parse, urllib.error, base64, time
headers = {
# Request headers
'api_key': '6b700f7ea9db408e9745c207da7ca827',}
params = urllib.parse.urlencode({})
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/StationPrediction.svc/json/GetPrediction/All?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
return str(data) #returns the data as a string rather than raw bytes
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def JSONfromMetro(trainString): #converts the string into a dictionary file
import json, re
fixSlash=re.compile(r'\\') #this line and the next remove triple-slashes, which screw up the json module
fixedTrainString=fixSlash.sub('',trainString)
trainJSON=json.loads(fixedTrainString[2:-2]+"}") #slightly adjusts the string to put it in json form
if isinstance(trainJSON,dict) and 'Trains' in trainJSON.keys():
return trainJSON['Trains']
else:
return None
def saveWMATASQL(trainData, engine): #saves the current WMATA data to open engine
import datetime, pandas as pd
#the line below creates a table name starting with WMATA and then containing the date and time information, with each day/hour/minute/second taking two characters
if not isinstance(trainData, list):
return None
DTstring=str(datetime.datetime.now().month)+str(datetime.datetime.now().day).rjust(2,'0')+str(datetime.datetime.now().hour).rjust(2,'0')+str(datetime.datetime.now().minute).rjust(2,'0')+str(datetime.datetime.now().second).rjust(2,'0')
trainFrame=pd.DataFrame('-', index=range(len(trainData)), columns=['DT','Car','Loc','Lin','Des','Min','Gro']) #creates trainFrame, the DataFrame to send to the SQL server
for iter in range(len(trainData)): #for all the trains in trainData
trainFrame.loc[iter]['DT']=DTstring
for colName in ['Car','LocationCode','Line','DestinationCode','Min','Group']: #select the six relevant fields
trainFrame.loc[iter][colName[:3]]=trainData[iter][colName] #and fill in the relevant data
trainFrame.to_sql('WMATAFull', engine, if_exists='append') #send trainFrame to the SQL server
return trainFrame
def lineNextDF(line, destList, arrData):
import pandas as pd
timeString=arrData.DT.iloc[0]
rowName=pd.to_datetime('2016-'+timeString[0]+'-'+timeString[1:3]+' '+timeString[3:5]+':'+timeString[5:7]+':'+timeString[7:])
# names the row as a timestamp with the month day hour minute second
lineStat=pd.DataFrame('-',index=[rowName],columns=line)
for station in line: #repeat the below process for every station on the line
trains2consider=arrData.loc[lambda df: df.Loc==station].loc[lambda df: df.Des.isin(destList)] #pull out the trains at that station heading toward the destinations
if len(trains2consider.index)>0: #If you found a train
if trains2consider.Des.iloc[0] in ['A11','B08','E01','K04']: #the next few lines set the station status to the color and ETA of the first arriving train
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0].lower()+':'+trains2consider.Min.iloc[0] #if the train is terminating early (at Grovesnor, Silver Spring or Mt Vernon), use lowercase
elif trains2consider.Des.iloc[0]=='E06':
lineStat.loc[rowName,station]='Yl:'+trains2consider.Min.iloc[0]
elif trains2consider.Des.iloc[0]=='A13':
lineStat.loc[rowName,station]='Rd:'+trains2consider.Min.iloc[0]
else:
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0]+':'+trains2consider.Min.iloc[0] #otherwise use upper
return lineStat
def allLNtoNE(arrData, surgeNum): #all of the lines to the North and East during Surge 4
import pandas as pd
LNlist=[]
for num in range(len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][num], NEdestList[surgeNum][num], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def allLNtoSW(arrData, surgeNum): #all of the lines to the South and West during Surge 4
import pandas as pd
LNlist=[]
for num in range(1,1+len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][-num][::-1], SWdestList[surgeNum][-num][::-1], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def WMATAtableSQL(timeMin,intervalSec, surgeNum): #records for timeMin minutes, about ever intervalSec seconds
import time, pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@team<EMAIL>:5432/WmataData') #opens the engine to WmataData
#creates a list of the table we're creating to add to the index
isStart=True
startTime=time.time()
while time.time()<(startTime+60*timeMin): #runs for timeMin minutes
stepStart=time.time()
WMATAdf=saveWMATASQL(JSONfromMetro(getMetroStatus()),engine) #save the current train data and appends the name to tableList
if isinstance(WMATAdf,pd.DataFrame) and len(WMATAdf.index)>0: #if you got data back
if isStart: #and it's the first row
allLN2NE=allLNtoNE(WMATAdf,surgeNum) #set allLNtoNE equal to the all LineNext to NE data
allLN2SW=allLNtoSW(WMATAdf,surgeNum) #set allLNtoSW equal to the all LineNext to SW data
isStart=False #and the next row will not be the first row
else: #for other rows
allLN2NE=allLN2NE.append(allLNtoNE(WMATAdf,surgeNum)) #append the data
allLN2SW=allLN2SW.append(allLNtoSW(WMATAdf,surgeNum))
stepTime=time.time()-stepStart #calculates the time this step took
if stepTime<intervalSec: #if intervalSec seconds have not passed,
time.sleep(intervalSec-stepTime) #wait until a total of intervalSec have passed
engine.connect().close()
return [allLN2NE, allLN2SW]
def lineNextSQL(line, timeString,destList, engine): #reads the next train to arrive at the stations in line heading toward destList and returns it as a Data Frame
import pandas as pd
from sqlalchemy import create_engine
isEngineNone=(engine is None)
if isEngineNone: #if there's not an engine, make one
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT"='+"'"+timeString+"';"
arrData=pd.read_sql(query,engine)
if isEngineNone:
engine.connect().close()
return lineNextDF(line, destList, arrData)
def lineNextTableSQL(line, firstTime, lastTime, destList): #saves the next train arrivals for a line and destList over time
import time, pandas as pd
from sqlalchemy import create_engine
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!<EMAIL>:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData= | pd.read_sql(query,engine) | pandas.read_sql |
import pandas as pd
def full_describe(series: pd.Series, verbose=True):
"""
Calculates a pandas describe of series, plus a count of unique and NaN
:param verbose: printing some other info
:param series: Pandas Series
:return: df with stats as cols
"""
stats_df = pd.DataFrame()
stats_df['dtype_kind'] = pd.Series([series.dtype.kind])
stats_df['null_count'] = pd.Series([series.isnull().sum()])
pandas_des = series.describe()
if series.dtype.kind != 'o':
str_des = series.astype(str).describe()
pandas_des = pandas_des.append(str_des.drop('count'))
stats_df = pd.concat([pd.DataFrame(pandas_des).transpose(), stats_df], axis=1)
return stats_df
class TestFullDescribe:
@classmethod
def setup_class(cls):
float_series = pd.Series([2,3,4.0,4.0,5])
float_integer = pd.Series([2,3,4,4,5,5,5,5,5])
string_series = | pd.Series(['a','b','c','d','d']) | pandas.Series |
#Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
#Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
#Storing the user information into a pandas dataframe
ratings_df = pd.read_csv('ratings.csv')
#Using regular expressions to find a year stored between parentheses
#We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
#Dropping the genres column
movies_df = movies_df.drop('genres', 1)
#Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
userInput = [
{'title':'Breakfast Club, The', 'rating':5},
{'title':'Toy Story', 'rating':3.5},
{'title':'Jumanji', 'rating':2},
{'title':"Pulp Fiction", 'rating':5},
{'title':'Akira', 'rating':4.5}
]
inputMovies = pd.DataFrame(userInput)
#Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
#Then merging it so we can get the movieId. It's implicitly merging it by title.
inputMovies = pd.merge(inputId, inputMovies)
#Dropping information we won't use from the input dataframe
inputMovies = inputMovies.drop('year', 1)
#Final input dataframe
#If a movie you added in above isn't here, then it might not be in the original
#dataframe or it might spelled differently, please check capitalisation.
#Filtering out users that have watched movies that the input has watched and storing it
userSubset = ratings_df[ratings_df['movieId'].isin(inputMovies['movieId'].tolist())]
#Groupby creates several sub dataframes where they all have the same value in the column specified as the parameter
userSubsetGroup = userSubset.groupby(['userId'])
userSubsetGroup.get_group(1130)
#Sorting it so users with movie most in common with the input will have priority
userSubsetGroup = sorted(userSubsetGroup, key=lambda x: len(x[1]), reverse=True)
userSubsetGroup = userSubsetGroup[0:100]
# Store the Pearson Correlation in a dictionary, where the key is the user Id and the value is the coefficient
pearsonCorrelationDict = {}
# For every user group in our subset
for name, group in userSubsetGroup:
# Let's start by sorting the input and current user group so the values aren't mixed up later on
group = group.sort_values(by='movieId')
inputMovies = inputMovies.sort_values(by='movieId')
# Get the N for the formula
nRatings = len(group)
# Get the review scores for the movies that they both have in common
temp_df = inputMovies[inputMovies['movieId'].isin(group['movieId'].tolist())]
# And then store them in a temporary buffer variable in a list format to facilitate future calculations
tempRatingList = temp_df['rating'].tolist()
# Let's also put the current user group reviews in a list format
tempGroupList = group['rating'].tolist()
# Now let's calculate the pearson correlation between two users, so called, x and y
Sxx = sum([i ** 2 for i in tempRatingList]) - pow(sum(tempRatingList), 2) / float(nRatings)
Syy = sum([i ** 2 for i in tempGroupList]) - pow(sum(tempGroupList), 2) / float(nRatings)
Sxy = sum(i * j for i, j in zip(tempRatingList, tempGroupList)) - sum(tempRatingList) * sum(tempGroupList) / float(
nRatings)
# If the denominator is different than zero, then divide, else, 0 correlation.
if Sxx != 0 and Syy != 0:
pearsonCorrelationDict[name] = Sxy / sqrt(Sxx * Syy)
else:
pearsonCorrelationDict[name] = 0
pearsonDF = pd.DataFrame.from_dict(pearsonCorrelationDict, orient='index')
pearsonDF.columns = ['similarityIndex']
pearsonDF['userId'] = pearsonDF.index
pearsonDF.index = range(len(pearsonDF))
topUsers=pearsonDF.sort_values(by='similarityIndex', ascending=False)[0:50]
topUsersRating=topUsers.merge(ratings_df, left_on='userId', right_on='userId', how='inner')
#Multiplies the similarity by the user's ratings
topUsersRating['weightedRating'] = topUsersRating['similarityIndex']*topUsersRating['rating']
#Applies a sum to the topUsers after grouping it up by userId
tempTopUsersRating = topUsersRating.groupby('movieId').sum()[['similarityIndex','weightedRating']]
tempTopUsersRating.columns = ['sum_similarityIndex','sum_weightedRating']
#Creates an empty dataframe
recommendation_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = | DataFrame(data) | pandas.DataFrame |
#
# @ 2021. TU Dortmund University,
# Institute of Energy Systems, Energy Efficiency and Energy Economics,
# Research group Distribution grid planning and operation
#
import pandas as pd
import os
import pickle
from pathlib import Path
ROOT_DIR = Path(__file__).parent
MID_DIR = r"C:\Users\user\Desktop\MiD"
# Laden der MiD-Daten
def get_mid_data(type_day, cs=None, var_power=False):
days = ["we", "sa", "so"]
# Datensatz mit simulierten Ladezeiten
if cs:
charge_scen = ["CS1", "CS2", "CS3"]
path = MID_DIR + r"\Aufbereitete Daten\Ladezeiten"
if var_power:
filename = "\\Trips_{}_{}_Ladezeiten_vp.csv".format(days[type_day - 1], charge_scen[cs - 1])
else:
filename = "\\Trips_{}_{}_Ladezeiten.csv".format(days[type_day - 1], charge_scen[cs - 1])
data = | pd.read_csv(path + filename) | pandas.read_csv |
import os
from typing import TypeVar
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy import stats
try:
from sentence_transformers import SentenceTransformer
import umap
import hdbscan
import torch
except ModuleNotFoundError as e:
print('Please install the dependencies for the visualization routines, using `pip install semanticlayertools[embeddml]`.')
raise e
smoothing = TypeVar('smoothing', bool, float)
def gaussian_smooth(x, y, grid, sd):
weights = np.transpose([stats.norm.pdf(grid, m, sd) for m in x])
weights = weights / weights.sum(0)
return (weights * y).sum(1)
def streamgraph(
filepath: str, smooth: smoothing = False,
minClusterSize: int = 1000, showNthGrid: int = 5
):
"""Plot streamgraph of cluster sizes vs years.
Based on https://www.python-graph-gallery.com/streamchart-basic-matplotlib
"""
basedf = pd.read_csv(filepath)
basedata = basedf.groupby(['year', 'cluster']).size().to_frame('counts').reset_index()
yearbase = [
x for x in range(
int(basedata.year.min()), int(basedata.year.max()) + 1
)
]
largeclu = list(basedata.groupby('cluster').sum().query(f'counts > {minClusterSize}').index)
cluDict = {}
for clu in basedata.cluster.unique():
if clu in largeclu:
cluvec = []
basedf = basedata.query('cluster == @clu')
baseyears = list(basedf.year.unique())
for year in yearbase:
if year in baseyears:
cluvec.append(basedf.query('year == @year').counts.iloc[0])
else:
cluvec.append(0)
cluDict[clu] = cluvec
fig, ax = plt.subplots(figsize=(16, 9))
if type(smooth) is float:
grid = np.linspace(yearbase[0], yearbase[-1], num=100)
y = [np.array(x) for x in cluDict.values()]
y_smoothed = [gaussian_smooth(yearbase, y_, grid, smooth) for y_ in y]
ax.stackplot(
grid,
y_smoothed,
labels=cluDict.keys(),
baseline="sym",
colors=plt.get_cmap('tab20').colors
)
pass
else:
ax.stackplot(
yearbase,
cluDict.values(),
labels=cluDict.keys(),
baseline='sym',
colors=plt.get_cmap('tab20').colors
)
ax.legend()
ax.set_title('Cluster sizes')
ax.set_xlabel('Year')
ax.set_ylabel('Number of publications')
ax.yaxis.set_ticklabels([])
ax.xaxis.grid(color='gray')
temp = ax.xaxis.get_ticklabels()
temp = list(set(temp) - set(temp[::showNthGrid]))
for label in temp:
label.set_visible(False)
ax.set_axisbelow(True)
return fig
def embeddedTextPlotting(
infolderpath: str, columnName: str, outpath: str,
umapNeighors: int = 200,
):
"""Create embedding for corpus text."""
print('Initializing embedder model.')
model = SentenceTransformer('all-MiniLM-L6-v2')
clusterfiles = os.listdir(infolderpath)
clusterdf = []
for x in clusterfiles:
try:
clusterdf.append(
pd.read_json(os.path.join(infolderpath, x), lines=True)
)
except ValueError:
raise
dataframe = pd.concat(clusterdf, ignore_index=True)
dataframe = dataframe.dropna(subset=[columnName], axis=0).reset_index(drop=True)
corpus = [x[0] for x in dataframe[columnName].values]
print('Start embedding.')
corpus_embeddings = model.encode(
corpus,
convert_to_tensor=True
)
torch.save(
corpus_embeddings,
f'{os.path.join(outpath, "embeddedCorpus.pt")}'
)
print('\tDone\nStarting mapping to 2D.')
corpus_embeddings_2D = umap.UMAP(
n_neighbors=umapNeighors,
n_components=2,
metric='cosine'
).fit_transform(corpus_embeddings)
np.savetxt(
os.path.join(outpath, "embeddedCorpus_2d.csv"),
corpus_embeddings_2D,
delimiter=',',
newline='\n'
)
print('\tDone.')
dataframe.insert(0, 'x', corpus_embeddings_2D[:, 0])
dataframe.insert(0, 'y', corpus_embeddings_2D[:, 1])
return dataframe
def embeddedTextClustering(
infolderpath: str, columnName: str, emdeddingspath: str, outpath: str,
umapNeighors: int = 200, umapComponents: int = 50,
hdbscanMinCluster: int = 500,
):
"""Create clustering based on embedding for corpus texts."""
print('Initializing embedder model.')
clusterfiles = os.listdir(infolderpath)
clusterdf = []
for x in clusterfiles:
try:
clusterdf.append(
pd.read_json(os.path.join(infolderpath, x), lines=True)
)
except ValueError:
raise
dataframe = | pd.concat(clusterdf, ignore_index=True) | pandas.concat |
import re
import pandas as pd
log = r"D:\workspace\DAT\benchmark-ts\log\dl_multi40_03091910.log"
file = open(log)
current_trial_no = ""
results = []
params = {}
data_name = ""
for line in file:
if re.match('Trial No:', line):
current_trail_no = line.split(":")[1][:-1]
params['trial_no'] = current_trail_no
if params['trial_no'] in ['34', '36']:
print('')
elif line[0:1] == '(' and line[2:3] == ')':
key = line.split(":")[0][4:]
value = line.split(":")[1].replace(' ', '')[:-1]
params[key] = value
elif re.match('========== data_name', line):
data_name = line.split(':')[1].replace(' ', '')[:-1]
elif re.match('.*best_trial_no:.*', line):
params['reward'] = line.split('reward:')[1].split(',')[0]
params['data_name'] = data_name
results.append(params)
params = {}
current_trail_no = ""
for result in results:
print(result)
file.close()
df = | pd.DataFrame(results) | pandas.DataFrame |
import dill
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from surprise import SVD, Reader, Dataset
from surprise.model_selection import GridSearchCV
from tensorflow.keras import layers, activations, models, optimizers, losses
from titlecase import titlecase
TFIDF_MATRIX_FILE = 'trained_models/recommendation/tfidf_matrix.pkl'
MOVIE_INDICES_FILE = 'trained_models/recommendation/movie_indices.pkl'
PREDICTED_RATING_SVD_MODEL_FILE = 'trained_models/recommendation/predicted_rating_svd.pkl'
QUANTILES_THRESHOLD = 0.95
PREDICTED_RATING_NN_WITH_EMBEDDING_MODEL = 'trained_models/recommendation/predicted_rating_nn_model'
PREDICTED_RATING_NN_WITH_EMBEDDING_RATING_SCALER_FILE = 'trained_models/recommendation/predicted_rating_nn_rating_scaler.pkl'
PREDICTED_RATING_NN_WITH_EMBEDDING_USER_ENCODER_FILE = 'trained_models/recommendation/predicted_rating_nn_user_encoder.pkl'
PREDICTED_RATING_NN_WITH_EMBEDDING_MOVIE_ENCODER_FILE = 'trained_models/recommendation/predicted_rating_nn_movie_encoder.pkl'
N_FACTORS = 10
# Demographic: trending based on popularity
def get_n_popular_movies(data, n):
return data.nlargest(n, 'popularity')[['id', 'original_title', 'genres', 'popularity', 'imdb_id']]
# Demographic: trending now based on IMDB weighted rating score
def get_n_trending_movies(data, n):
m = data['vote_count'].quantile(QUANTILES_THRESHOLD)
c = data['vote_average'].mean()
rating_movies = data.copy().loc[data['vote_count'] >= m]
rating_movies['rating_score'] = rating_movies.apply(lambda movie: calc_weighted_rating(movie, m, c), axis=1)
# because dataset max year is 2015, recent 3 years is 2012
recent_three_year_movies = rating_movies.loc[rating_movies['release_year'] >= 2012]
older_than_three_year_movies = rating_movies.loc[rating_movies['release_year'] < 2012]
mid = int(n / 2)
recent_three_year_movies = recent_three_year_movies.nlargest(mid, 'rating_score')
older_than_three_year_movies = older_than_three_year_movies.nlargest(n - mid, 'rating_score')
return pd.concat([recent_three_year_movies, older_than_three_year_movies])[
['id', 'original_title', 'genres', 'vote_count', 'vote_average', 'rating_score', 'imdb_id', 'release_year']]
# Demographic: trending based on IMDB weighted rating score
def get_n_rating_movies(data, n):
m = data['vote_count'].quantile(QUANTILES_THRESHOLD)
c = data['vote_average'].mean()
rating_movies = data.copy().loc[data['vote_count'] >= m]
rating_movies['rating_score'] = rating_movies.apply(lambda movie: calc_weighted_rating(movie, m, c), axis=1)
return rating_movies.nlargest(n, 'rating_score')[
['id', 'original_title', 'genres', 'vote_count', 'vote_average', 'rating_score', 'imdb_id']]
def calc_weighted_rating(movie, m, c):
v = movie['vote_count']
r = movie['vote_average']
return (v * r + m * c) / (v + m)
# Content based filtering: propose list of the most similar movies based on cosine similarity calculation
# between the words or text in vector form (use TF-IDF)
def calc_tfidf_matrix(data):
data['original_title'] = data['original_title'].str.strip()
data['overview'] = data['overview'].fillna('')
data['tagline'] = data['tagline'].fillna('')
# Merging original title, overview and tagline together
data['description'] = data['original_title'] + data['overview'] + data['tagline']
tfidf = TfidfVectorizer(analyzer='word', stop_words='english')
tfidf_matrix = tfidf.fit_transform(data['description'])
# construct a reverse map of indices and movie original title
data['title'] = data['original_title'].str.lower()
movie_indices = | pd.Series(data.index, index=data['title']) | pandas.Series |
# Imports libraries
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.graph_objs.scatter.marker import Line
import plotly.express as px
from scipy.integrate import odeint
##Imports regional data
df_city_current = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto25/CasosActualesPorComuna_std.csv")
# regional daily active cases
df_region_current = df_city_current[df_city_current["Comuna"] == "Total"]
df_region_current_bypop = df_city_current[df_city_current["Comuna"] == "Total"]
df_region_current_bypop['bypop'] = df_region_current.loc[0:,"Casos actuales"]/(df_region_current.loc[0:,"Poblacion"]/1000)
df_region_current_bypop = df_region_current_bypop[["Region", "Fecha", "bypop", 'Casos actuales']].pivot(index='Fecha', columns='Region', values=['bypop', 'Casos actuales'])
df_region_current_bypop
df_region_current = df_region_current[["Region", "Fecha", "Casos actuales"]].pivot(index='Fecha', columns='Region', values='Casos actuales')
###Deaths
df_city_deaths = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto38/CasosFallecidosPorComuna_std.csv")
df_deaths_region = df_city_deaths[df_city_deaths['Comuna']=='Total'].groupby(['Region','Fecha'])[['Casos fallecidos']].sum()
df_deaths_current = df_deaths_region.reset_index().pivot(index='Fecha', columns='Region', values='Casos fallecidos')
df_city_deaths['bypop']= df_city_deaths[df_city_deaths['Comuna']=='Total']['Casos fallecidos']/(df_city_deaths[df_city_deaths['Comuna']=='Total']['Poblacion']/1000)
df_deaths_region_bypop = df_city_deaths[df_city_deaths['Comuna']=='Total'].groupby(['Region','Fecha'])[['Casos fallecidos','bypop']].sum()
df_deaths_current_bypop = df_deaths_region_bypop.reset_index().pivot(index='Fecha', columns='Region', values=['bypop', 'Casos fallecidos'])
###Number of PCR exams
df_pcr_region = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto7/PCR_std.csv")
df_pcr_current = df_pcr_region[['Region', 'fecha', 'numero']].pivot(index='fecha', columns='Region', values='numero')
#Critical patients
df_uci_region = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto8/UCI_std.csv")
df_uci_current = df_uci_region[['Region', 'fecha', 'numero']].pivot(index='fecha', columns='Region', values='numero')
#Imports national data
df = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto5/TotalesNacionales_T.csv',
error_bad_lines=False
)
df= df.set_index('Fecha')
#Checking for data correlations
#correlations = df[['Casos totales',
# 'Casos recuperados',
# 'Fallecidos',
# 'Casos activos',
# 'Casos nuevos totales',
# 'Casos nuevos con sintomas',
# 'Casos nuevos sin sintomas']].corr()
#px.imshow(correlations)
#Ploting Symptomatic cases
#px.line(df['Casos nuevos con sintomas'],
# y= 'Casos nuevos con sintomas',
# title= "Asymptomatic cases",
# labels= dict({'Casos nuevos con sintomas':'Number of Symptomatic cases',
# 'Fecha':'Date'})
# )
#Ploting Asymptomatic cases
#px.line(df['Casos nuevos sin sintomas'],
# y= 'Casos nuevos sin sintomas',
# title= "Asymptomatic cases",
# labels= dict({'Casos nuevos sin sintomas':'Number of Asymptomatic cases',
# 'Fecha':'Date'})
# )
#Ploting Total new cases
#px.line(df['Casos nuevos totales'],
# y= 'Casos nuevos totales',
# title= "Daily cases",
# labels= {'Casos nuevos totales':'Number of cases',
# 'Fecha':'Date'}
# )
#Ploting Total Chilean cases
#px.line(df['Casos totales'],
# y= 'Casos totales',
# title= "Total Chilean cases",
# labels= {'Fecha':'Date'}
# ).update_layout(
# yaxis_title='Number of cases')
# Total cases in logarithmic scales
#px.line(df['Casos totales'],
# y= 'Casos totales',
# title= "Total Chilean cases in logarithmic scale",
# labels= dict({'Casos totales':'Log10(Number of cases)',
# 'Fecha':'Date'}),
# log_y = True
# )
# Adds PCR test data
pcr_cases = | pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto7/PCR_T.csv') | pandas.read_csv |
"""
Power Flow Analysis: Support Functions
Created By:
<NAME>
<NAME>
"""
import numpy as np
from numpy.linalg import inv
import pandas as pd
"""
Imports Bus and line data from excel sheets
Takes in an array containing ['File Location', 'Sheet Name']
Returns two panda data frames for the bus and line data
"""
def import_BusAndLineData(BusData_Location, LineData_Location):
BusData = | pd.read_excel(BusData_Location[0], sheet_name=BusData_Location[1]) | pandas.read_excel |
import pandas as pd
import numpy as np
def read_superratings(s):
return pd.read_excel(pd.ExcelFile(s))
def colour_green_dark(x):
return '\cellcolor{CT_green1}(' + str(int(x)) + ')'
def colour_green_light(x):
return '\cellcolor{CT_green2}(' + str(int(x)) + ')'
def colour_red_light(x):
return '(' + str(int(x)) + ')'
if __name__ == '__main__':
file_path = 'C:/Users/mnguyen/LGSS/Investments Team - SandPits - SandPits/data/input/vendors/superratings/2021/06/SuperRatings FCRS June 2021.xlsx'
lgs_fund_list = [
'Local Government Super Accum - High Growth',
'Local Government Super Accum - Balanced Growth',
'Local Government Super Accum - Balanced',
'Local Government Super Accum - Conservative',
'Local Government Super Accum - Managed Cash'
]
as_fund_list = [
'Active Super - High Growth',
'Active Super - Balanced Growth',
'Active Super - Balanced',
'Active Super - Conservative',
'Active Super - Managed Cash'
]
sr_index_list = [
'SR50 Growth (77-90) Index',
'SR50 Balanced (60-76) Index',
'SR25 Conservative Balanced (41-59) Index',
'SR50 Capital Stable (20-40) Index',
'SR50 Cash Index'
]
sr_index_50 = [
'SR50 Growth (77-90) Index',
'SR50 Balanced (60-76) Index',
'SR50 Capital Stable (20-40) Index',
'SR50 Cash Index'
]
sr_index_25 = [
'SR25 Conservative Balanced (41-59) Index',
]
comparison_list = [
'Local Government Super',
'Aware Super',
'LGIAsuper',
'Vision SS',
'Not for Profit Fund Median'
]
comparison_list1 = [
'LGIAsuper Accum - Aggressive',
'Local Government Super Accum - High Growth',
'Active Super - High Growth',
'Vision SS - Growth',
'Aware Super (previously First State Super) - Growth',
'Aware Super - Growth',
'LGIAsuper Accum - Diversified Growth',
'Local Government Super Accum - Balanced Growth',
'Active Super - Balanced Growth',
'Vision SS - Balanced Growth',
'Aware Super (previously First State Super) - Balanced Growth',
'Aware Super - Balanced Growth',
'LGIAsuper Accum - Balanced',
'Local Government Super Accum - Balanced',
'Active Super - Balanced',
'Vision SS - Balanced',
'Aware Super (previously First State Super) - Conservative Growth',
'Aware Super - Conservative Growth',
'LGIAsuper Accum - Stable',
'Local Government Super Accum - Conservative',
'Active Super - Conservative',
'Vision SS - Conservative',
'Aware Super (previously First State Super) Tailored Super Plan - Cash Fund',
'Aware Super Tailored Super Plan - Cash Fund',
'LGIAsuper Accum - Cash',
'Local Government Super Accum - Managed Cash',
'Active Super - Managed Cash',
'Vision SS - Cash',
'Not for Profit Fund Median',
]
column_dict = {
'Fund': 'Fund',
'SR Index': 'SR Index',
'Size $Mill': '$Mill',
'Size Rank': 'Size Rank',
'Monthly Return %': '1 Month %',
'Monthly Return Rank': '1 Month Rank',
'Quarterly Return %': '3 Month %',
'Quarterly Return Rank': '3 Month Rank',
'FYTD %': 'FYTD %',
'FYTD Rank': 'FYTD Rank',
'Rolling 1 Year %': '1 Year %',
'Rolling 1 Year Rank': '1 Year Rank',
'Rolling 3 Year %': '3 Year %',
'Rolling 3 Year Rank': '3 Year Rank',
'Rolling 5 Year %': '5 Year %',
'Rolling 5 Year Rank': '5 Year Rank',
'Rolling 7 Year %': '7 Year %',
'Rolling 7 Year Rank': '7 Year Rank',
'Rolling 10 Year %': '10 Year %',
'Rolling 10 Year Rank': '10 Year Rank',
}
column_rank_list = [
'Size Rank',
'Monthly Return Rank',
'Quarterly Return Rank',
'FYTD Rank',
'Rolling 1 Year Rank',
'Rolling 3 Year Rank',
'Rolling 5 Year Rank',
'Rolling 7 Year Rank',
'Rolling 10 Year Rank',
]
short_name_dict = {
'Aware Super': 'Aware',
'Aware Super Tailored Super Plan': 'Aware',
'LGIAsuper': 'LGIA',
'Local Government Super': 'LGS',
'Active Super': 'Active Super',
'Vision SS': 'Vision',
'Not for Profit Fund Median': 'NFP Median'
}
df_0 = read_superratings(file_path)
print("Reporting date: ", max(df_0['Date']).date())
df_0['Fund'] = [(str(x).split(' - '))[0] for x in df_0['Option Name']]
df_0['Fund'] = [(str(x).split(' ('))[0] for x in df_0['Fund']]
df_0['Fund'] = [(str(x).split(' Accum'))[0] for x in df_0['Fund']]
# for column_rank in column_rank_list:
#
# df_0[column_rank] = ['(' + str(int(x)) + ')' if pd.notna(x) else np.nan for x in df_0[column_rank]]
df_1 = df_0[df_0['SR Index'].isin(sr_index_list)].reset_index(drop=True)
df_1_a = df_1[df_1['Option Name'].isin(as_fund_list)]
df_1_b = df_1[~df_1['Option Name'].isin(as_fund_list)]
df_1_a = df_1_a.reset_index(drop=False)
df_1_b = df_1_b.reset_index(drop=False)
df_1_a_25 = df_1_a[df_1_a['SR Index'].isin(sr_index_25)]
df_1_a_50 = df_1_a[df_1_a['SR Index'].isin(sr_index_50)]
for column_rank in column_rank_list:
df_1_a_25[column_rank] = [
colour_green_dark(x) if x != '-' and int(x) <= 6 else
colour_green_light(x) if x != '-' and int(x) <= 13 else
colour_red_light(x) if x != '-' else
np.nan
for x in df_1_a_25[column_rank]
]
df_1_a_50[column_rank] = [
colour_green_dark(x) if x != '-' and int(x) <= 13 else
colour_green_light(x) if x != '-' and int(x) <= 25 else
colour_red_light(x) if x != '-' else
np.nan
for x in df_1_a_50[column_rank]
]
df_1_b[column_rank] = ['(' + str(int(x)) + ')' if pd.notna(x) else np.nan for x in df_1_b[column_rank]]
df_1_a_colour = pd.concat([df_1_a_25, df_1_a_50]).sort_values(['index'])
df_1 = pd.concat([df_1_a_colour, df_1_b]).sort_values(['index']).drop(columns=['index'], axis=1)
# df_2 = df_1[df_1['Fund'].isin(comparison_list)].reset_index(drop=True)
df_2 = df_1[df_1['Option Name'].isin(comparison_list1)].reset_index(drop=True)
df_3 = df_2[column_dict]
df_4 = df_3.rename(columns=column_dict)
df_4['Fund'] = [short_name_dict[x] for x in df_4['Fund']]
sr_index_to_df = dict(list(df_4.groupby(['SR Index'])))
for sr_index, df_temp0 in sr_index_to_df.items():
#df_temp1 = df_temp0.drop(columns=['SR Index'], axis=1)
df_temp1 = df_temp0[['Fund']]
df_temp2 = df_temp0[['$Mill', 'Size Rank']]
df_temp3 = df_temp0[[
'FYTD %',
'FYTD Rank',
'1 Year %',
'1 Year Rank',
'3 Year %',
'3 Year Rank',
'5 Year %',
'5 Year Rank',
'7 Year %',
'7 Year Rank',
'10 Year %',
'10 Year Rank'
]]
columns_temp_multilevel1 = | pd.MultiIndex.from_product([[''], ['Fund']]) | pandas.MultiIndex.from_product |
from typing import Iterable, Dict, Union, Tuple, Set, Hashable, Optional, Any
from itertools import chain as iter_chain
from multiprocessing import cpu_count
from logging import Logger
from pandas import Series, DataFrame, Index, MultiIndex
import pandas as pd
from numpy import ndarray
import numpy as np
import numexpr as ne
import numba as nb
from .api import (AbstractSymbol, ExpressionGroup, ChoiceNode, NumberSymbol, VectorSymbol, TableSymbol, MatrixSymbol,
ExpressionSubGroup)
from .exceptions import ModelNotReadyError
from .core import (worker_nested_probabilities, worker_nested_sample, worker_multinomial_probabilities,
worker_multinomial_sample, fast_indexed_add, UtilityBoundsError)
from .parsing.constants import NAN_STR, NEG_INF_STR, NEG_INF_VAL, OUT_STR, RESERVED_WORDS
class ChoiceModel(object):
def __init__(self, *, precision: int = 8, debug_id: Tuple[int, int] = None):
# Tree data
self._max_level: int = 0
self._all_nodes: Dict[str, ChoiceNode] = {}
self._top_nodes: Set[ChoiceNode] = set()
# Scope and expressions
self._expressions: ExpressionGroup = ExpressionGroup()
self._scope: Dict[str, AbstractSymbol] = {}
# Index objects
self._decision_units: Optional[Index] = None
# Cached items
self._cached_cols: Optional[Index] = None
self._cached_utils: Optional[DataFrame] = None
# Other
self._precision: int = 0
self.precision: int = precision
self.debug_id: Optional[Tuple[int, int]] = debug_id # debug_id must be a valid label used to search an Index
self.debug_results: Optional[DataFrame] = None
@property
def precision(self) -> int:
"""The number of bytes used to store floating-point utilities. Can only be 4 or 8"""
return self._precision
@precision.setter
def precision(self, i: int):
assert i in {4, 8}, f"Only precision values of 4 or 8 are allowed (got {i})"
self._precision = i
@property
def _partial_utilities(self) -> DataFrame:
self.validate(expressions=False, assignment=False)
if self._cached_utils is None:
dtype = np.dtype(f"f{self._precision}")
matrix = np.zeros(shape=[len(self.decision_units), len(self.choices)], dtype=dtype)
table = DataFrame(matrix, index=self.decision_units, columns=self.choices)
self._cached_utils = table
else:
table = self._cached_utils
return table
# region Tree operations
def _create_node(self, name: str, logsum_scale: float, parent: ChoiceNode = None) -> ChoiceNode:
expected_namespace = name
if parent is None and name in self._all_nodes:
old_node = self._all_nodes.pop(name) # Remove from model dictionary
self._top_nodes.remove(old_node) # Remove from top-level choices
elif parent is not None:
expected_namespace = parent.full_name + '.' + name
if expected_namespace in self._all_nodes:
del self._all_nodes[expected_namespace] # Remove from model dictionary
level = 1 if parent is None else (parent.level + 1)
node = ChoiceNode(self, name, parent=parent, logsum_scale=logsum_scale, level=level)
self._all_nodes[expected_namespace] = node
return node
def add_choice(self, name: str, logsum_scale: float = 1.0) -> ChoiceNode:
"""Create and add a new discrete choice to the model, at the top level. Returns a node object which can also add
nested choices, and so on. Choice names must only be unique within a given nest, although for clarity it is
recommended that choice names are unique across all nests (especially when sampling afterwards).
The model preserves the order of insertion of choices.
Args:
name: The name of the choice to be added. The name will also appear in the returned Series or DataFrame when
the model is run in discrete mode.
logsum_scale: The "theta" parameter, commonly referred to as the logsum scale. Must be in the interval
(0, 1.0].
Returns:
ChoiceNode: The added choice node, which also has an "add_choice" method for constructing nested models.
"""
if self._cached_cols is not None:
self._cached_cols = None
if self._cached_utils is not None:
self._cached_utils = None
node = self._create_node(name, logsum_scale)
self._top_nodes.add(node)
return node
def add_choices(self, names: Iterable[str], logsum_scales: Iterable[float] = None) -> Dict[str, ChoiceNode]:
"""Convenience function for batch-adding several choices at once (for a multinomial logit model). See
``add_choice()`` for more details.
The model preserves the order of insertion of choices.
Args:
names: Iterable of string names of choices.
logsum_scales: Iterable of logsum scale parameters (see add_choice). Must be the same length as `names`, if
provided
Returns:
dict: Mapping of name: ChoiceNode for the added nodes
"""
if self._cached_cols is not None:
self._cached_cols = None
if self._cached_utils is not None:
self._cached_utils = None
if logsum_scales is None:
logsum_scales = [1.0 for _ in names]
retval = {}
for name, logsum_scale in zip(names, logsum_scales):
node = self._create_node(name, logsum_scale)
retval[name] = node
self._top_nodes.add(node)
return retval
@property
def choices(self) -> Index:
"""Pandas Index representing the choices in the model"""
if self._cached_cols is not None:
return self._cached_cols
self.validate(decision_units=False, expressions=False, assignment=False)
max_level = self.depth
if max_level == 1:
return Index(self._all_nodes.keys())
else:
nested_tuples = [node.nested_id(max_level) for node in self._all_nodes.values()]
level_names = ['root']
for i in range(1, max_level):
level_names.append(f'nest_{i + 1}')
return MultiIndex.from_tuples(nested_tuples, names=level_names)
@property
def elemental_choices(self) -> Index:
"""For a nested model, return the Index of 'elemental' choices without children that are available to be
chosen."""
max_level = self.depth
if max_level == 1:
return self.choices
elemental_tuples = []
for node in self._all_nodes.values():
if node.is_parent:
continue
elemental_tuples.append(node.nested_id(max_level))
return MultiIndex.from_tuples(elemental_tuples)
@property
def depth(self) -> int:
"""The maximum number of levels in a nested logit model. By definition, multinomial models have a depth of 1"""
return max(node.level for node in self._all_nodes.values())
def _flatten(self) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""Converts nested structure to arrays for Numba-based processing"""
max_level = self.depth
assert max_level > 1
n_nodes = len(self._all_nodes)
hierarchy = np.full(n_nodes, -1, dtype='i8')
levels = np.zeros(n_nodes, dtype='i8')
logsum_scales = np.ones(n_nodes, dtype='f8')
bottom_flags = np.full(n_nodes, True, dtype='?')
node_positions = {node.full_name: i for i, node in enumerate(self._all_nodes.values())}
for node in self._all_nodes.values():
position = node_positions[node.full_name]
levels[position] = node.level - 1 # Internal levels start at 1.
if node.parent is not None:
parent_position = node_positions[node.parent.full_name]
hierarchy[position] = parent_position
if node.is_parent:
logsum_scales[position] = node.logsum_scale
bottom_flags[position] = False
return hierarchy, levels, logsum_scales, bottom_flags
# endregion
# region Expressions and scope operations
@property
def decision_units(self) -> Index:
"""The units or agents or OD pairs over which choices are to be evaluated. MUST BE SET before symbols can be
assigned, or utilities calculated; otherwise ModelNotReadyError will be raised"""
if self._decision_units is None:
raise ModelNotReadyError("No decision units defined")
return self._decision_units
@decision_units.setter
def decision_units(self, item):
"""The units or agents or OD pairs over which choices are to be evaluated. MUST BE SET before symbols can be
assigned, or utilities calculated; otherwise ModelNotReadyError will be raised."""
# If there are any assigned symbols, clear them so as not to conflict with the new decision units
for symbol in self._scope.values():
if isinstance(symbol, NumberSymbol):
continue # Don't empty symbols that don't depend on the DU.
symbol.empty()
if isinstance(item, Index):
self._decision_units = item
else:
self._decision_units = Index(item)
@staticmethod
def _check_symbol_name(name: str):
# TODO: Check function names from NumExpr
if name in RESERVED_WORDS:
raise SyntaxError(f"Symbol name `{name}` cannot be used as it is a reserved keyword.")
def declare_number(self, name: str):
"""Declares a simple scalar variable, of number, boolean, or text type"""
self._check_symbol_name(name)
symbol = NumberSymbol(self, name)
self._scope[name] = symbol
def declare_vector(self, name: str, orientation: int):
"""Declares a vector variable. Vectors can be aligned with the decision units (rows, orientation=0) or choices
(columns, orientation=1). Supports NumPy arrays or Pandas Series objects.
Args:
name: Name of the variable to declare
orientation: 0 if oriented to the decision units/rows, 1 if oriented to the choices/columns
"""
self._check_symbol_name(name)
self._scope[name] = VectorSymbol(self, name, orientation)
def declare_table(self, name: str, orientation: int, mandatory_attributes: Set[str] = None,
allow_links: bool = True):
"""Declares a table variable. Similar to vectors, tables can align with either the decision units (rows,
orientation=0) or choices (columns, orientation=1), but allow for more complex attribute lookups. For ideal
usage, all columns in the specified table should be valid Python variable names, as otherwise "dotted" access
will not work in utility computation. LinkedDataFrames are fully supported (and even encouraged).
Args:
name: Name of the variable to declare
orientation: 0 if oriented to the decision units/rows, 1 if oriented to the choices/columns
mandatory_attributes:
allow_links:
"""
self._check_symbol_name(name)
self._scope[name] = TableSymbol(self, name, orientation, mandatory_attributes, allow_links)
def declare_matrix(self, name: str, orientation: int = 0, reindex_cols: bool = True, reindex_rows: bool = True,
fill_value: Any = 0):
"""Declares a matrix that fully or partially aligns with the rows or columns. This is useful when manual control
is needed over both the decision units and the choices. Only DataFrames are supported.
Args:
name: Name of the variable to declare
orientation: 0 if the index/columns are oriented to the decision units/choices, 1 if oriented to the
choices/decision units.
reindex_cols: If True, allows the model to expand the assigned matrix over the decision units, filling any
missing values with 0
reindex_rows: If True, allows the model to expand the assigned matrix over the choices, filling any
missing values with 0
fill_value: The fill value to use for missing rows or columns when reindex_cols=True or reindex_rows=True.
The dtype of fill_value should be compatible with the dtype of the data being assigned, otherwise it
could lead to problems. Defaults to 0.
"""
self._check_symbol_name(name)
self._scope[name] = MatrixSymbol(self, name, orientation, reindex_cols, reindex_rows, fill_value=fill_value)
def __getitem__(self, item) -> AbstractSymbol:
"""Gets a declared symbol to be assigned"""
return self._scope[item]
def clear_scope(self):
self._scope.clear()
@property
def expressions(self) -> ExpressionGroup:
return self._expressions
@expressions.setter
def expressions(self, item):
for expr in item:
self._expressions.append(expr)
# endregion
# region Run methods
def validate(self, *, tree: bool = True, decision_units: bool = True, expressions: bool = True,
assignment: bool = True, group: Hashable = None):
"""Checks that the model components are self-consistent and that the model is ready to run. Optionally, some
components can be skipped, in order to partially validate a model under construction.
Also gets called internally by the model at various stages
Args:
tree: Checks that all nested nodes have two or more children.
decision_units: Checks that the decision units have been assigned.
expressions: Checks that expressions use declared symbols.
assignment: Checks that used and declared symbols have been assigned
group: If not ``None``, checks the expressions for only the specified group. This also applies to the
assignment check. Otherwise, all expressions and symbols will be checked.
Raises:
ModelNotReadyError: if any check fails.
"""
def assert_valid(condition, message):
if not condition:
raise ModelNotReadyError(message)
if tree:
assert_valid(len(self._top_nodes) >= 2, "At least two or more choices must be defined")
for c in self._all_nodes.values():
n_children = c.n_children
assert_valid(n_children != 1, f"Nested choice '{c.full_name}' cannot have exactly one child node")
if decision_units:
assert_valid(self.decision_units is not None, "Decision units must be defined.")
if not expressions and not assignment:
return
expr_container = self._expressions if group is None else self._expressions.get_group(group)
symbols_to_check = list(expr_container.itersimple()) + list(expr_container.iterchained())
for name in symbols_to_check:
if name in RESERVED_WORDS:
continue # These gets added in manually later.
assert_valid(name in self._scope, f"Symbol '{name}' used in expressions but has not been declared")
if assignment:
assert_valid(self._scope[name].filled, f"Symbol '{name}' is declared but never assigned")
def run_discrete(self, *, random_seed: int = None, n_draws: int = 1, astype: Union[str, np.dtype] = 'category',
squeeze: bool = True, n_threads: int = 1, clear_scope: bool = True, result_name: str = None,
logger: Logger = None, scale_utilities: bool = True) -> Tuple[Union[DataFrame, Series], Series]:
"""For each decision unit, discretely sample one or more times (with replacement) from the probability
distribution.
Args:
random_seed: The random seed for drawing uniform samples from the Monte Carlo.
n_draws: The number of times to draw (with replacement) for each record. Must be >= 1. Run time is
proportional to the number of draws.
astype: The dtype of the return array; the result will be cast to the
given dtype. The special value 'category' returns a Categorical Series (or a DataFrame for n_draws > 1).
The special value 'index' returns the positional index in the sorted array of node names.
squeeze: Only used when n_draws == 1. If True, then a Series will be returned, otherwise a DataFrame
with one column will be returned.
n_threads: The number of threads to uses in the computation. Must be >= 1
clear_scope: If True and override_utilities not provided, data stored in the scope for
utility computation will be released, freeing up memory. Turning this off is of limited use.
result_name: Name for the result Series or name of the columns of the result DataFrame. Purely aesthetic.
logger: Optional Logger instance which reports expressions being evaluated
scale_utilities: For a nested model, if True then lower-level utilities will be divided by the logsum scale
of the parent nest. If False, no scaling is performed. This is entirely dependant on the reported form
of estimated model parameters.
Returns:
Tuple[DataFrame or Series, Series]: The first item returned is always the results of the model evaluation,
representing the choice(s) made by each decision unit. If n_draws > 1, the result is a DataFrame, with
n_draws columns, otherwise a Series. The second item is the top-level logsum term from the logit model,
for each decision unit. This is always a Series, as its value doesn't change with the number of draws.
"""
self.validate()
if random_seed is None:
random_seed = np.random.randint(1, 1000)
assert n_draws >= 1
# Utility computations
utility_table = self._evaluate_utilities(self._expressions, n_threads=n_threads, logger=logger).values
if clear_scope:
self.clear_scope()
# Compute probabilities and sample
nb.set_num_threads(n_threads) # Set the number of threads for parallel execution
nested = self.depth > 1
if nested:
hierarchy, levels, logsum_scales, bottom_flags = self._flatten()
raw_result, logsum = worker_nested_sample(utility_table, hierarchy, levels, logsum_scales, bottom_flags,
n_draws, random_seed, scale_utilities=scale_utilities)
else:
raw_result, logsum = worker_multinomial_sample(utility_table, n_draws, random_seed)
# Finalize results
logsum = Series(logsum, index=self.decision_units)
result = self._convert_result(raw_result, astype, squeeze, result_name)
return result, logsum
def _make_column_mask(self, filter_: str) -> Union[int, None]:
if filter_ is None:
return None
col_index = self.choices
column_depth = col_index.nlevels
filter_parts = filter_.split('.')
if column_depth == 1:
assert len(filter_parts) == 1
index_item = filter_parts[0]
else:
assert len(filter_parts) <= column_depth
index_item = tuple(filter_parts + ['.'] * (column_depth - len(filter_parts)))
return col_index.get_loc(index_item) # Get the column number for the selected choice
def _evaluate_utilities(self, expressions: Union[ExpressionGroup, ExpressionSubGroup],
n_threads: int = None, logger: Logger = None, allow_casting=True) -> DataFrame:
if self._decision_units is None:
raise ModelNotReadyError("Decision units must be set before evaluating utility expressions")
if n_threads is None:
n_threads = cpu_count()
row_index = self._decision_units
col_index = self.choices
# if debug, get index location of corresponding id
debug_label = None
debug_expr = []
debug_results = []
if self.debug_id:
debug_label = row_index.get_loc(self.debug_id)
utilities = self._partial_utilities.values
# Prepare locals, including scalar, vector, and matrix variables that don't need any further processing.
shared_locals = {NAN_STR: np.nan, OUT_STR: utilities, NEG_INF_STR: NEG_INF_VAL}
for name in expressions.itersimple():
if name in shared_locals:
continue
symbol = self._scope[name]
shared_locals[name] = symbol._get()
ne.set_num_threads(n_threads)
ne.set_vml_num_threads(n_threads)
casting_rule = 'same_kind' if allow_casting else 'safe'
for expr in expressions:
if logger is not None:
logger.debug(f"Evaluating expression `{expr.raw}`")
# TODO: Add error handling
choice_mask = self._make_column_mask(expr.filter_)
local_dict = shared_locals.copy() # Make a shallow copy of the shared symbols
# Add in any dict literals, expanding them to cover all choices
expr._prepare_dict_literals(col_index, local_dict)
# Evaluate any chains on-the-fly
for symbol_name, usages in expr.chains.items():
symbol = self._scope[symbol_name]
for substitution, chain_info in usages.items():
data = symbol._get(chain_info=chain_info)
local_dict[substitution] = data
self._kernel_eval(expr.transformed, local_dict, utilities, choice_mask, casting_rule=casting_rule)
# save each expression and values for a specific od pair
if self.debug_id:
debug_expr.append(expr.raw)
debug_results.append(utilities[debug_label].copy())
nans = np.isnan(utilities)
n_nans = nans.sum()
if n_nans > 0:
raise UtilityBoundsError(f"Found {n_nans} cells in utility table with NaN")
if self.debug_id: # expressions.tolist() doesn't work...
self.debug_results = DataFrame(debug_results, index=debug_expr, columns=col_index)
return DataFrame(utilities, index=row_index, columns=col_index)
@staticmethod
def _kernel_eval(transformed_expr: str, local_dict: Dict[str, np.ndarray], out: np.ndarray, column_index,
casting_rule='same_kind'):
if column_index is not None:
for key, val in local_dict.items():
if hasattr(val, 'shape'):
if val.shape[1] > 1:
local_dict[key] = val[:, column_index]
elif val.shape[1] == 1:
local_dict[key] = val[:, 0]
out = out[:, column_index]
expr_to_run = f"{OUT_STR} + ({transformed_expr})"
ne.evaluate(expr_to_run, local_dict=local_dict, out=out, casting=casting_rule)
def _convert_result(self, raw_result: ndarray, astype, squeeze: bool, result_name: str) -> Union[Series, DataFrame]:
n_draws = raw_result.shape[1]
column_index = pd.RangeIndex(n_draws, name=result_name)
record_index = self.decision_units
if astype == 'index':
if squeeze and n_draws == 1:
return | pd.Series(raw_result[:, 0], index=record_index, name=result_name) | pandas.Series |
"""
Purpose: To simulate expected educational attainment gains from embryo selection between families.
Date: 10/09/2019
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
from between_family_ea_simulation import (
get_random_index,
get_max_pgs_index,
select_embryos_by_index,
calc_phenotype_diffs
)
from scipy.stats import norm
import argparse
def calc_within_family_values(n, num_embryos, heritability, correlation_mz, rsquared):
"""
Purpose: To get the ghat_i and y_i for each family pair, where i={1,...,num_embryos}.
Arguments:
n: integer number of parent pairs
num_embryos: integer number of embryos for each parent pair
heritability: heritability of clinical trait
correlation_mz: twin correlation of clinical trait
rsquared: Ancestry-specific R^2 value for PGS prediction of trait
Returns:
{'pgs':df_pgs, 'liability':df_liability}.
Each dataframe has size (n x num_embryos) and holds polygenic scores and phenotype liability values, respectively.
"""
df_a = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
"""
Functions for generating and processing bed files
"""
import pandas as pd
import subprocess
pd.set_option('mode.chained_assignment', None)
def combineBeds(beds):
'''
Concatenate bed files into a single output and sort by chromosome then
start position
'''
rows = []
for bed in beds:
with open(bed) as inf:
for line in inf:
rows.append(line.strip().split("\t"))
if len(rows) != 0:
# sort the combined bed file
df = | pd.DataFrame(rows) | pandas.DataFrame |
#! /usr/bin/python3
print('this is cell-analyzer v0.1.0' + '\n')
print('preparing image segmentation run...' + '\n')
import os
import glob
import numpy as np
import pandas as pd
import skimage as sk
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as shc
from datetime import datetime as dt
from matplotlib.colors import ListedColormap, LogNorm
from matplotlib import cm
from skimage import exposure, feature, filters, measure, morphology, segmentation
from scipy import ndimage as ndi
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
import umap
import warnings
warnings.filterwarnings("ignore")
def process_image(img, norm_window, min_hole_size, min_cell_size, extrema_blur, peak_sep, name='temp.TIF', save_path = '.'):
img_dims = np.shape(img)
print('image dimensions: ', img_dims)
if len(img_dims) < 3:
n_chan = 1
content = img
v_min, v_max = np.percentile(content, (1,99))
content_scaled = exposure.rescale_intensity(content, in_range=(v_min, v_max))
else:
# handle if first channel is blank
if np.mean(img[:,:,0]) < 1:
img = img[:,:,1:]
img_dims = np.shape(img)
# handle other blank channels
n_chan = img_dims[2]
base = img[:,:,0]
# restack image, excluding blank channels
for channel in range(1, n_chan):
if np.sum(img[:,:,channel]) > (img_dims[0] * img_dims[1] * 0.2):
base = np.stack((base, img[:,:,channel]), axis=2)
img = base
img_dims = np.shape(img)
n_chan = img_dims[2]
### custom colormaps
N = 256
blank = np.zeros(N)
gray = np.linspace(0, 1, N)
# blue
blues = np.ones((N,4))
blues[:,0] = blank
blues[:,1] = blank
blues[:,2] = gray
blue_cmap = ListedColormap(blues)
# green
greens = np.ones((N,4))
greens[:,0] = blank
greens[:,1] = gray
greens[:,2] = blank
green_cmap = ListedColormap(greens)
# red
reds = np.ones((N,4))
reds[:,0] = gray
reds[:,1] = blank
reds[:,2] = blank
red_cmap = ListedColormap(reds)
# separate and scale channels for vis
content = np.sum(img, axis=2)
v_min, v_max = np.percentile(content, (1,99))
content_scaled = exposure.rescale_intensity(content, in_range=(v_min, v_max))
if n_chan >= 1:
dapi = img[:,:,0]
v_min, v_max = np.percentile(dapi, (1,99))
dapi_scaled = exposure.rescale_intensity(dapi, in_range=(v_min, v_max))
if n_chan >= 2:
gfp = img[:,:,1]
v_min, v_max = np.percentile(gfp, (1,99))
gfp_scaled = exposure.rescale_intensity(gfp, in_range=(v_min, v_max))
if n_chan >= 3:
txred = img[:,:,2]
v_min, v_max = np.percentile(txred, (1,99))
txred_scaled = exposure.rescale_intensity(txred, in_range=(v_min, v_max))
if n_chan == 4:
cy5 = img[:,:,3]
v_min, v_max = np.percentile(cy5, (1,99))
cy5_scaled = exposure.rescale_intensity(cy5, in_range=(v_min, v_max))
if n_chan > 4:
print('handling of more than 4 image channels not supported')
### handle single high-res or stitched low-res images (large dimensions)
if np.logical_and(np.shape(img)[0] < 2500, np.shape(img)[1] < 2500):
# correct image and create content mask
bg = filters.threshold_local(content, norm_window)
norm = content / bg
blur = filters.gaussian(norm, sigma=2)
# blur = filters.gaussian(content, sigma=2)
otsu = filters.threshold_otsu(blur)
mask = blur > otsu
mask_filled = morphology.remove_small_holes(mask, min_hole_size)
selem = morphology.disk(3)
mask_opened = morphology.binary_opening(mask_filled, selem)
mask_filtered = morphology.remove_small_objects(mask_opened, min_cell_size)
heavy_blur = filters.gaussian(content, extrema_blur)
blur_masked = heavy_blur * mask_filtered
else:
blur = filters.gaussian(content, sigma=2)
otsu = filters.threshold_otsu(blur)
mask = blur > otsu
mask_filtered = mask
blur_masked = mask * blur
# find local maxima
coords = feature.peak_local_max(blur_masked, min_distance=peak_sep)
coords_T = []
coords_T += coords_T + [[point[1], point[0]] for point in coords]
coords_T = np.array(coords_T)
markers = np.zeros(np.shape(content))
for i, point in enumerate(coords):
markers[point[0], point[1]] = i
# generate labeled cells
rough_labels = measure.label(mask_filtered)
distance = ndi.distance_transform_edt(mask_filtered)
ws = segmentation.watershed(-distance, markers, connectivity=2, watershed_line=True)
labeled_cells = ws * mask_filtered
# measure and store image channel props from content mask
print('# of content channels (n_chan): ', n_chan)
cell_props = {}
if n_chan > 1:
# store and gate dapi
dapi_props = measure.regionprops(labeled_cells, dapi)
cell_props['dapi_props'] = dapi_props
dapi_blur = filters.gaussian(dapi)
dapi_otsu = filters.threshold_otsu(dapi_blur)
dapi_mask = dapi_blur > dapi_otsu
gated_dapi = dapi_mask * labeled_cells
if n_chan >= 2:
# store and gate gfp
gfp_props = measure.regionprops(labeled_cells, gfp)
cell_props['gfp_props'] = gfp_props
gfp_blur = filters.gaussian(gfp)
gfp_otsu = filters.threshold_otsu(gfp_blur)
gfp_mask = gfp_blur > gfp_otsu
gated_gfp = gfp_mask * labeled_cells
if n_chan >= 3:
# store and gate txred
txred_props = measure.regionprops(labeled_cells, txred)
cell_props['txred_props'] = txred_props
txred_blur = filters.gaussian(txred)
txred_otsu = filters.threshold_otsu(txred_blur)
txred_mask = txred_blur > txred_otsu
gated_txred = txred_mask * labeled_cells
if n_chan == 4:
# store and gate cy5
cy5_props = measure.regionprops(labeled_cells, cy5)
cell_props['cy5_props'] = cy5_props
cy5_blur = filters.gaussian(cy5)
cy5_otsu = filters.threshold_otsu(cy5_blur)
cy5_mask = cy5_blur > cy5_otsu
gated_cy5 = cy5_mask * labeled_cells
content_props = measure.regionprops(labeled_cells, content)
cell_props['image_content'] = content_props
# define custom label mask colormap
plasma = cm.get_cmap('plasma', 256)
newcolors = plasma(np.linspace(0, 1, 256))
newcolors[0, :] = [0, 0, 0, 1]
custom_cmap = ListedColormap(newcolors)
# plot & return results
if n_chan == 1:
# plt.imshow(content_scaled, cmap='gray')
# plt.title('original content')
# plt.show()
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(9,5))
ax[0].imshow(content_scaled, cmap='viridis')
ax[0].set_title('scaled image')
ax[1].imshow(mask_filtered, cmap='gray')
ax[1].set_title('mask')
ax[2].imshow(labeled_cells, cmap=custom_cmap)
ax[2].plot(coords[:,1], coords[:,0], c='yellow', marker = '*', linestyle='', markersize=2)
ax[2].set_title('labels')
plt.tight_layout()
# plt.show()
plt.savefig(save_path + '/' + name[:-4] + '_cell_labels.png')
plt.close()
elif n_chan == 2:
# plt.imshow(content_scaled, cmap='gray')
# plt.title('original content')
# plt.show()
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(12,7))
ax[0, 0].imshow(dapi_scaled, cmap=blue_cmap)
ax[0, 0].set_title('scaled dapi')
ax[0, 1].imshow(mask_filtered, cmap='gray')
ax[0, 1].set_title('image mask')
ax[0, 2].imshow(labeled_cells, cmap=custom_cmap)
ax[0, 2].plot(coords[:,1], coords[:,0], c='yellow', marker = '*', linestyle='', markersize=2)
ax[0, 2].set_title('labels')
ax[1, 0].imshow(gfp_scaled, cmap=green_cmap)
ax[1, 0].set_title('scaled gfp')
ax[1, 1].imshow(gfp_mask, cmap='gray')
ax[1, 1].set_title('gfp mask')
ax[1, 2].imshow(gated_gfp, cmap=custom_cmap)
ax[1, 2].set_title('gated gfp')
plt.tight_layout()
# plt.show()
plt.savefig(save_path + '/' + name[:-4] + '_cell_labels.png')
plt.close()
elif n_chan == 3:
# plt.imshow(content_scaled, cmap='gray')
# plt.title('original content')
# plt.show()
fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(15,9))
ax[0, 0].imshow(dapi_scaled, cmap=blue_cmap)
ax[0, 0].set_title('scaled dapi')
ax[0, 1].imshow(mask_filtered, cmap='gray')
ax[0, 1].set_title('image mask')
ax[0, 2].imshow(labeled_cells, cmap=custom_cmap)
ax[0, 2].plot(coords[:,1], coords[:,0], c='yellow', marker = '*', linestyle='', markersize=2)
ax[0, 2].set_title('labels')
ax[1, 0].imshow(gfp_scaled, cmap=green_cmap)
ax[1, 0].set_title('scaled gfp')
ax[1, 1].imshow(gfp_mask, cmap='gray')
ax[1, 1].set_title('gfp mask')
ax[1, 2].imshow(gated_gfp, cmap=custom_cmap)
ax[1, 2].set_title('gated gfp')
ax[2, 0].imshow(txred_scaled, cmap=red_cmap)
ax[2, 0].set_title('scaled txred')
ax[2, 1].imshow(txred_mask, cmap='gray')
ax[2, 1].set_title('txred mask')
ax[2, 2].imshow(gated_txred, cmap=custom_cmap)
ax[2, 2].set_title('gated txred')
plt.tight_layout()
# plt.show()
plt.savefig(save_path + '/' + name[:-4] + '_cell_labels.png')
plt.close()
else:
# plt.imshow(content_scaled, cmap='gray')
# plt.title('original content')
# plt.show()
fig, ax = plt.subplots(nrows=4, ncols=3, figsize=(16,10))
ax[0, 0].imshow(dapi_scaled, cmap=blue_cmap)
ax[0, 0].set_title('scaled dapi')
ax[0, 1].imshow(mask_filtered, cmap='gray')
ax[0, 1].set_title('image mask')
ax[0, 2].imshow(labeled_cells, cmap=custom_cmap)
ax[0, 2].plot(coords[:,1], coords[:,0], c='yellow', marker = '*', linestyle='', markersize=2)
ax[0, 2].set_title('labels')
ax[1, 0].imshow(gfp_scaled, cmap=green_cmap)
ax[1, 0].set_title('scaled gfp')
ax[1, 1].imshow(gfp_mask, cmap='gray')
ax[1, 1].set_title('gfp mask')
ax[1, 2].imshow(gated_gfp, cmap=custom_cmap)
ax[1, 2].set_title('gated gfp')
ax[2, 0].imshow(txred_scaled, cmap=red_cmap)
ax[2, 0].set_title('scaled txred')
ax[2, 1].imshow(txred_mask, cmap='gray')
ax[2, 1].set_title('txred mask')
ax[2, 2].imshow(gated_txred, cmap=custom_cmap)
ax[2, 2].set_title('gated txred')
ax[3, 0].imshow(cy5_scaled, cmap='gray')
ax[3, 0].set_title('scaled cy5')
ax[3, 1].imshow(cy5_mask, cmap='gray')
ax[3, 1].set_title('cy5 mask')
ax[3, 2].imshow(gated_cy5, cmap=custom_cmap)
ax[3, 2].set_title('gated cy5')
plt.tight_layout()
# plt.show()
plt.savefig(save_path + '/' + name[:-4] + '_cell_labels.png')
plt.close()
return img, labeled_cells, cell_props, n_chan
def read_and_process_directory(base_directory, norm_window, min_hole_size, min_cell_size, extrema_blur, peak_sep, formatted_titles, channel_list):
# process all .tif files in a passed directory and returns results dataframe (.csv)
# set up paths
time_stamp = dt.now().strftime('%Y_%m_%d_%H_%M_%S')
save_path = '_extracted_data_%s' % time_stamp
save_path = os.path.join(base_directory, save_path)
print('base: ' + base_directory)
print('save: ' + save_path)
print('channel_list: ', channel_list)
os.mkdir(save_path)
# get paths for images in base directory
image_list = glob.glob(os.path.join(base_directory, '*.TIF')) # '.TIF' or '.tif'
image_list = image_list + glob.glob(os.path.join(base_directory, '*.tif'))
# initialize results dataframe
results_df = pd.DataFrame()
# iteratively read in images by filenames
for i, img_path in enumerate(image_list):
img = plt.imread(img_path)
name = os.path.basename(img_path)
print('\n')
print(name)
print('img ' + str(i + 1) + ' of ' + str(len(image_list)))
img, labeled_cells, cell_props, n_chan = process_image(img, norm_window, min_hole_size, min_cell_size, extrema_blur, peak_sep, name, save_path)
# save all cell quant in results dataframe
img_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 09:06:22 2019
@author: jeremy_lehner
"""
import pandas as pd
from os import path
import glob
def load_champ_names():
"""
Loads the champion names from a csv file,
returns them as a pandas series
Parameters
----------
None
Returns
-------
names : pandas series
Contains champion names as strings
"""
if path.exists('./data/champion_names.csv'):
names = pd.read_csv('./data/champion_names.csv',
header=None,
squeeze=True)
else:
print('champion_names.csv cannot be found (._.)')
names = []
return names
def load_release_dates():
"""
Loads the champion release dates from a csv file,
returns them as a pandas series
Parameters
----------
None
Returns
-------
dates : pandas series
Contains champion release dates as strings 'YYYY-MM-DD'
"""
if path.exists('./data/champion_release_dates.csv'):
dates = pd.read_csv('./data/champion_release_dates.csv',
header=None,
squeeze=True)
else:
print('champion_release_dates.csv file cannot be found (._.)')
dates = []
return dates
def load_number_of_skins():
"""
Loads the number of skins for each champion from a csv file,
returns them as a pandas series
Parameters
----------
None
Returns
-------
num_skins : pandas series
Contains number of champion skins as integers
"""
if path.exists('./data/num_skins.csv'):
num_skins = pd.read_csv('./data/num_skins.csv',
header=None,
squeeze=True)
else:
print('num_skins.csv file cannot be found (._.)')
num_skins = []
return num_skins
def load_win_rates():
"""
Loads the champion win rates and correspdonding dates from csv files,
returns them in a pandas data frame
Parameters
----------
None
Returns
-------
winrates_all : pandas data frame
Contains champion win rates as floats and dates as strings
"""
path = './data/win/'
files = glob.glob(path + '*.csv')
winrates = []
for file in files:
winrates.append(pd.read_csv(file))
winrates_all = pd.concat(winrates, ignore_index=True)
return winrates_all
def load_ban_rates():
"""
Loads the champion ban rates and correspdonding dates from csv files,
returns them in a pandas data frame
Parameters
----------
None
Returns
-------
banrates_all : pandas data frame
Contains champion ban rates as floats and dates as strings
"""
path = './data/ban/'
files = glob.glob(path + '*.csv')
banrates = []
for file in files:
banrates.append( | pd.read_csv(file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Summarize MSOE EECS SO XLSX files recursively"""
# TODO: order by outcome number, course number, section
# TODO: Add columns (N>=pro,N) to summarize as required by process.
# TODO: Format row bands per outcome
# TODO: Year should default to current AY
# TODO: Select year range
import os
import re
import argparse
from datetime import datetime
import pandas as pd
import openpyxl
TIME_TAG = datetime.now().strftime('%G%m%dT%H%M%S') # used to tag artifacts created by this run
METADATA = {'Program': 'C3', 'Course Number': 'C5', 'Quarter/Year': 'C7',
'Section': 'G5', 'Instructor': 'G7', 'Outcome': 'A10', 'Percent Proficient': 'B17'}
LEVEL = ['Exemplary', 'Accomplished', 'Proficient', 'Developing', 'Beginning']
PROGRAM = {'BME', 'CE', 'CS', 'EE', 'SE'}
# extract just outcome number from longer string
OUTCOME_NUMBER = [re.compile(r"^\((\w+)\)"), # < AY20
re.compile(r"^\[\w+\s(\d)\]")] # >= AY20
def get_so_data(full_path):
"""Read summary SO assessment data from the given XLSX file"""
level_rows = [24, 28, 32, 36, 40]
level_cols = {'Level_str': 'D', 'Level_int': 'F', 'Count': 'G', 'Percentage': 'F'}
workbook = openpyxl.load_workbook(full_path, data_only=True) # values, not formulas
sheet = workbook['Form']
assert sheet[level_cols['Level_str']+f'{level_rows[0]}'].value == LEVEL[0], \
'Unexpected format: did not find highest level description where expected'
data_values = []
for field, cell in METADATA.items():
value = sheet[cell].value
if field == 'Outcome':
for pattern in OUTCOME_NUMBER:
if result := pattern.match(value):
value = result[1] # discard extra text; [0] is entire match
break
data_values.append(value)
for row in level_rows:
data_values.append(sheet[level_cols['Count']+f'{row}'].value)
return data_values
def main(args):
"""Summarize MSOE EECS SO XLSX files recursively"""
all_data = []
assert args.program in PROGRAM, f'Program code {args.program} is not recognized'
assert 1980 < args.year < 2999, f'Academic year ({args.year}) must be in 4-digit format'
for dirpath, _, filenames in os.walk(os.path.join(args.directory, args.program,
str(args.year))):
for filename in filenames:
if filename.endswith(".xlsx"):
full_path = os.path.join(dirpath, filename)
print(full_path)
all_data.append(get_so_data(full_path))
col_names = list(METADATA.keys())
col_names.extend(LEVEL)
dataframe = | pd.DataFrame(all_data, columns=col_names) | pandas.DataFrame |
#! /usr/bin/env python
# make plot of copyrighter.py output table
# by gjr
"""
Get the taxon relative abudance change ratio after copy correction
% python taxa-change-ratio-copyrighter.py \
level <outfile> \
<file.before.cc.taxonomy> \
<file.after.cc.taxonomy>
"""
import sys, os, itertools, collections
from operator import itemgetter, attrgetter
import numpy
import pandas
EXCLUDE = ['Archaea', 'Eukaryota', 'unknown']
#EXCLUDE = []
TOP=20
#ORDER=True #reverse the order
ORDER=False #normal order
def readData(f):
taxa_lis = []
num_lis = []
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
if line == '':
continue
taxa, num = line.split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
taxa = taxa.rstrip(';')
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip()
if item.endswith(')'):
item = item.split('(')[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
item = item.lower()
if 'unclassified' in item:
item = 'Unclassifed'
elif 'unknown' in item:
item = 'Unclassifed'
elif 'other' in item:
item = 'Unclassifed'
elif 'unassigned' in item:
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
taxa_lis.append(lis2)
num_lis.append(float(num))
return taxa_lis, num_lis
def main():
#Usage: python <thisFile> level <outfile> <file.taxonomy> ..
if len(sys.argv) < 3:
mes = 'Usage: python {} level <outfile> <file.taxonomy>..'
print >> sys.stderr, mes.format(os.path.basename(sys.argv[0]))
print >> sys.stderr, "*** filename.split('.')[0] will "\
"be the sample label"
sys.exit(1)
level = int(sys.argv[1])
level = level - 1
outfile = sys.argv[2]
d = {}
dCombined = {}
lisSampOrder = []
for f in sys.argv[3:]:
samp = os.path.basename(f).split('.')[0] # sample name
container, num_lis = readData(f)
tranLis = itertools.izip_longest(*container, fillvalue='Unclassified')
levelLis = list(tranLis)[level]
countD = {}
for tax, num in zip(levelLis, num_lis):
countD[tax] = countD.get(tax, 0) + num
total = sum(countD.values())
d[samp] = dict((taxa, countD[taxa]*1.0/total) for taxa in countD)
for key in d[samp]:
dCombined[key] = dCombined.get(key, 0) + d[samp][key]
lisSampOrder.append(samp)
df = | pandas.DataFrame(d) | pandas.DataFrame |
'''
Copyright <NAME> and <NAME>
2015, 2016, 2017, 2018
'''
from __future__ import print_function # Python 2.7 and 3 compatibility
import os
import sys
import time
import shutil
#import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Standard imports
from numpy import pi
from numpy.linalg import inv
from stat import S_ISREG, ST_CTIME, ST_MODE
from pandas import HDFStore, Series, DataFrame
from collections import OrderedDict
from pathlib import Path
# pyEPR custom imports
from . import hfss
from . import logger
from . import config
from . import AttrDict
from .hfss import ureg, CalcObject, ConstantVecCalcObject, set_property
from .toolbox import print_NoNewLine, print_color, deprecated, fact, epsilon_0, hbar, Planck, fluxQ, nck, \
divide_diagonal_by_2, print_matrix, DataFrame_col_diff, get_instance_vars,\
sort_df_col, sort_Series_idx
from .toolbox_circuits import Calcs_basic
from .toolbox_plotting import cmap_discrete, legend_translucent
from .numeric_diag import bbq_hmt, make_dispersive
import matplotlib as mpl
from .toolbox_report import plot_convergence_f_vspass, plot_convergence_max_df, plot_convergence_solved_elem, plot_convergence_maxdf_vs_sol
class Project_Info(object):
"""
Class containing options and information about the manipulation and analysis in HFSS.
Junction info:
-----------------------
self.junctions : OrderedDict()
A Josephson tunnel junction has to have its parameters specified here for the analysis.
Each junction is given a name and is specified by a dictionary.
It has the following properties:
1. `Lj_variable` : Name of HFSS variable that specifies junction inductance Lj defined on the boundary condition in HFSS. DO NOT USE Global names that start with $.
2. `rect` : Name of HFSS rectangle on which lumped boundary condition is specified.
3. `line` : Name of HFSS polyline which spans the length of the recntalge. Used to define the voltage across the junction. Used to define the current orientation for each junction. Used to define sign of ZPF.
4. `length` : Length in HFSS of the junction rectangle and line (specified in meters).
Example definition:
..code-block python
# Define a single junction
pinfo = Project_Info('')
pinfo.junctions['j1'] = {'Lj_variable' : 'Lj1',
'rect' : 'JJrect1',
'line' : 'JJline1',
'length' : parse_units('50um')} # Length is in meters
# Specify multiple junctions in HFSS model
n_junctions = 5
for i in range(1, 1+n_junctions):
pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj{i}',
'rect' : f'JJrect{i}',
'line' : f'JJline{i}',
'length' : parse_units('50um')}
HFSS app connection settings
-----------------------
project_path : str
Directory path to the hfss project file. Should be the directory, not the file.
default = None: Assumes the project is open, and thus gets the project based on `project_name`
project_name : str, None
Name of the project within the project_path. "None" will get the current active one.
design_name : str, None
Name of the design within the project. "None" will get the current active one.
setup_name : str, None
Name of the setup within the design. "None" will get the current active one.
Additional init setting:
-----------------------
do_connect : True by default. Connect to HFSS
HFSS desgin settings
-----------------------
describe junction parameters
junc_rects = None
Name of junction rectangles in HFSS
junc_lines = None
Name of lines in HFSS used to define the current orientation for each junction
junc_LJ_names = None
Name of junction inductance variables in HFSS.
Note, DO NOT USE Global names that start with $.
junc_lens = None
Junciton rect. length, measured in meters.
"""
class _Dissipative:
#TODO: remove and turn to dict
def __init__(self):
self.dielectrics_bulk = None
self.dielectric_surfaces = None
self.resistive_surfaces = None
self.seams = None
def __init__(self, project_path=None, project_name=None, design_name=None,
do_connect = True):
self.project_path = str(Path(project_path)) if not (project_path is None) else None # Path: format path correctly to system convention
self.project_name = project_name
self.design_name = design_name
self.setup_name = None
## HFSS desgin: describe junction parameters
# TODO: introduce modal labels
self.junctions = OrderedDict() # See above for help
self.ports = OrderedDict()
## Dissipative HFSS volumes and surfaces
self.dissipative = self._Dissipative()
self.options = config.options_hfss
# Conected to HFSS variable
self.app = None
self.desktop = None
self.project = None
self.design = None
self.setup = None
if do_connect:
self.connect()
_Forbidden = ['app', 'design', 'desktop', 'project',
'dissipative', 'setup', '_Forbidden', 'junctions']
def save(self, hdf):
'''
hdf : pd.HDFStore
'''
hdf['project_info'] = pd.Series(get_instance_vars(self, self._Forbidden))
hdf['project_info_dissip'] = pd.Series(get_instance_vars(self.dissipative))
hdf['project_info_options'] = pd.Series(get_instance_vars(self.options))
hdf['project_info_junctions'] = pd.DataFrame(self.junctions)
hdf['project_info_ports'] = pd.DataFrame(self.ports)
@deprecated
def connect_to_project(self):
return self.connect()
def connect(self):
'''
Connect to HFSS design.
'''
#logger.info('Connecting to HFSS ...')
self.app, self.desktop, self.project = hfss.load_ansys_project(
self.project_name, self.project_path)
self.project_name = self.project.name
self.project_path = self.project.get_path()
# Design
if self.design_name is None:
self.design = self.project.get_active_design()
self.design_name = self.design.name
logger.info(f'\tOpened active design\n\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')
else:
try:
self.design = self.project.get_design(self.design_name)
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error: {e}\n")
raise(Exception(' Did you provide the correct design name? Failed to pull up design.').with_traceback(tb))
#if not ('Eigenmode' == self.design.solution_type):
# logger.warning('\tWarning: The design tpye is not Eigenmode. Are you sure you dont want eigenmode?')
# Setup
try:
n_setups = len(self.design.get_setup_names())
if n_setups == 0:
logger.warning('\tNo design setup detected.')
if self.design.solution_type == 'Eigenmode':
logger.warning('\tCreating eigenmode default setup one.')
self.design.create_em_setup()
self.setup_name = 'Setup'
self.setup = self.design.get_setup(name=self.setup_name)
self.setup_name = self.setup.name
logger.info(f'\tOpened setup: {self.setup_name} [{type(self.setup)}]')
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error: {e}\n")
raise(Exception(' Did you provide the correct setup name? Failed to pull up setup.').with_traceback(tb))
# Finalize
self.project_name = self.project.name
self.design_name = self.design.name
logger.info('\tConnected successfully.\t :)\t :)\t :)\t\n')
return self
def check_connected(self):
"""Checks if fully connected including setup
"""
return\
(self.setup is not None) and\
(self.design is not None) and\
(self.project is not None) and\
(self.desktop is not None) and\
(self.app is not None)
def disconnect(self):
'''
Disconnect from existing HFSS design.
'''
assert self.check_connected(
) is True, "it does not appear that you have connected to HFSS yet. use connect()"
self.project.release()
self.desktop.release()
self.app.release()
hfss.release()
### UTILITY FUNCTIONS
def get_dm(self):
'''
Get the design and modeler
.. code-block:: python
oDesign, oModeler = projec.get_dm()
'''
oDesign = self.design
oModeler = oDesign.modeler
return oDesign, oModeler
def get_all_variables_names(self):
"""Returns array of all project and local design names."""
return self.project.get_variable_names() + self.design.get_variable_names()
def get_all_object_names(self):
"""Returns array of strings"""
oObjects = []
for s in ["Non Model", "Solids", "Unclassified", "Sheets", "Lines"]:
oObjects += self.design.modeler.get_objects_in_group(s)
return oObjects
def validate_junction_info(self):
""" Validate that the user has put in the junction info correctly.
Do no also forget to check the length of the rectangles/line of
the junction if you change it.
"""
all_variables_names = self.get_all_variables_names()
all_object_names = self.get_all_object_names()
for jjnm, jj in self.junctions.items():
assert jj['Lj_variable'] in all_variables_names, "pyEPR project_info user error found: Seems like for junction `%s` you specified a design or project variable for `Lj_variable` that does not exist in HFSS by the name: `%s` " % (
jjnm, jj['Lj_variable'])
for name in ['rect', 'line']:
assert jj[name] in all_object_names, "pyEPR project_info user error found: Seems like for junction `%s` you specified a %s that does not exist in HFSS by the name: `%s` " % (
jjnm, name, jj[name])
#TODO: Check the length of the rectnagle
#==============================================================================
#%% Main compuation class & interface with HFSS
#==============================================================================
class pyEPR_HFSS(object):
"""
This class defines a pyEPR_HFSS object which calculates and saves
Hamiltonian parameters from an HFSS simulation.
Further, it allows one to calcualte dissipation, etc
"""
def __init__(self, *args, **kwargs):
'''
Parameters:
-------------------
project_info : Project_Info
Suplpy the project info or the parameters to create pinfo
Example use:
-------------------
'''
if (len(args) == 1) and (args[0].__class__.__name__ == 'Project_Info'): #isinstance(args[0], Project_Info): # fails on module repload with changes
project_info = args[0]
else:
assert len(args) == 0, 'Since you did not pass a Project_info object as a arguemnt, we now assuem you are trying to create a project info object here by apassing its arguments. See Project_Info. It does not take any arguments, only kwargs.'
project_info = Project_Info(*args, **kwargs)
# Input
self.pinfo = project_info
if self.pinfo.check_connected() is False:
self.pinfo.connect()
self.verbose = True #TODO: change verbose to logger. remove verbose flags
self.append_analysis = False #TODO
# hfss connect module
self.fields = self.setup.get_fields()
self.solutions = self.setup.get_solutions()
# Variations - the following get updated in update_variation_information
self.nmodes = int(1)
self.listvariations = ("",)
self.nominalvariation = '0'
self.nvariations = 0
self.update_variation_information()
self.hfss_variables = OrderedDict() # container for eBBQ list of varibles
if self.verbose:
print('Design \"%s\" info:'%self.design.name)
print('\t%-15s %d\n\t%-15s %d' %('# eigenmodes', self.nmodes, '# variations', self.nvariations))
# Setup data saving
self.setup_data()
self.latest_h5_path = None # #self.get_latest_h5()
''' #TODO: to be implemented to use old files
if self.latest_h5_path is not None and self.append_analysis:
latest_bbq_analysis = pyEPR_Analysis(self.latest_h5_path)
if self.verbose:
print( 'Varied variables and values : ', latest_bbq_analysis.get_swept_variables(), \
'Variations : ', latest_bbq_analysis.variations)
'''
@property
def setup(self):
return self.pinfo.setup
@property
def design(self):
return self.pinfo.design
@property
def project(self):
return self.pinfo.project
@property
def desktop(self):
return self.pinfo.desktop
@property
def app(self):
return self.pinfo.app
@property
def junctions(self):
return self.pinfo.junctions
@property
def ports(self):
return self.pinfo.ports
@property
def options(self):
return self.pinfo.options
def get_latest_h5(self):
'''
No longer used. Could be added back in.
'''
dirpath = self.data_dir
entries1 = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath)) # get all entries in the directory w/ stats
entries2 = ((os.stat(path), path) for path in entries1)
entries3 = ((stat[ST_CTIME], path) # leave only regular files, insert creation date
for stat, path in entries2 if S_ISREG(stat[ST_MODE]) and path[-4:]=='hdf5')
#NOTE: on Windows `ST_CTIME` is a creation date but on Unix it could be something else
#NOTE: use `ST_MTIME` to sort by a modification date
paths_sorted = []
for cdate, path in sorted(entries3):
paths_sorted.append(path)
#print time.ctime(cdate), os.path.basename(path)
if len(paths_sorted) > 0:
self.latest_h5_path = paths_sorted[-1]
if self.verbose:
print('This simulations has been analyzed, latest data in ' + self.latest_h5_path)
else:
self.latest_h5_path = None
if self.verbose:
print('This simulation has never been analyzed')
def setup_data(self):
'''
Set up folder paths for saving data to.
'''
data_dir = Path(config.root_dir) / \
Path(self.project.name)/Path(self.design.name)
#if self.verbose:
# print("\nResults will be saved to:\n" +'- '*20+'\n\t'+ str(data_dir)+'\n'+'- '*20+'\n')
if len(self.design.name) > 50:
print_color('WARNING! DESING FILENAME MAY BE TOO LONG! ')
if not data_dir.is_dir():
data_dir.mkdir(parents=True, exist_ok=True)
self.data_dir = str(data_dir)
self.data_filename = str(
data_dir / (time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) + '.hdf5'))
"""
@deprecated
def calc_p_j(self, modes=None, variation=None):
'''
Calculates the p_j for all the modes.
Requires a calculator expression called P_J.
'''
lv = self.get_lv(variation)
if modes is None:
modes = range(self.nmodes)
pjs = OrderedDict()
for ii, m in enumerate(modes):
print('Calculating p_j for mode ' + str(m) + ' (' + str(ii) + '/' + str(np.size(modes)-1) + ')')
self.solutions.set_mode(m+1, 0)
self.fields = self.setup.get_fields()
P_J = self.fields.P_J
pjs['pj_'+str(m)] = P_J.evaluate(lv=lv)
self.pjs = pjs
if self.verbose:
print(pjs)
return pjs
"""
def calc_p_junction_single(self, mode):
'''
This function is used in the case of a single junction only.
For multiple junctions, see `calc_p_junction`.
Assumes no lumped capacitive elements.
'''
pj = OrderedDict()
pj_val = (self.U_E-self.U_H)/self.U_E
pj['pj_'+str(mode)] = np.abs(pj_val)
print(' p_j_' + str(mode) + ' = ' + str(pj_val))
return pj
#TODO: replace this method with the one below, here because osme funcs use it still
def get_freqs_bare(self, variation):
#str(self.get_lv(variation))
freqs_bare_vals = []
freqs_bare_dict = OrderedDict()
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
for m in range(self.nmodes):
freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m]
freqs_bare_vals.append(1e9*freqs[m])
if kappa_over_2pis is not None:
freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m]
else:
freqs_bare_dict['Q_'+str(m)] = 0
self.freqs_bare = freqs_bare_dict
self.freqs_bare_vals = freqs_bare_vals
return freqs_bare_dict, freqs_bare_vals
def get_freqs_bare_pd(self, variation):
'''
Retun pd.Sereis of modal freq and qs for given variation
'''
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
if kappa_over_2pis is None:
kappa_over_2pis = np.zeros(len(freqs))
freqs = pd.Series(freqs, index=range(len(freqs))) # GHz
Qs = freqs / pd.Series(kappa_over_2pis, index=range(len(freqs)))
return freqs, Qs
def get_lv(self, variation=None):
'''
List of variation variables.
Returns list of var names and var values.
Such as ['Lj1:=','13nH', 'QubitGap:=','100um']
Parameters
-----------
variation : string number such as '0' or '1' or ...
'''
if variation is None:
lv = self.nominalvariation
lv = self.parse_listvariations(lv)
else:
lv = self.listvariations[ureg(variation)]
lv = self.parse_listvariations(lv)
return lv
def get_lv_EM(self, variation):
if variation is None:
lv = self.nominalvariation
#lv = self.parse_listvariations_EM(lv)
else:
lv = self.listvariations[ureg(variation)]
#lv = self.parse_listvariations_EM(lv)
return str(lv)
def parse_listvariations_EM(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def parse_listvariations(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def get_variables(self, variation=None):
lv = self.get_lv(variation)
variables = OrderedDict()
for ii in range(int(len(lv)/2)):
variables['_'+lv[2*ii][:-2]] = lv[2*ii+1]
self.variables = variables
return variables
def calc_energy_electric(self,
variation=None,
volume='AllObjects',
smooth=False):
r'''
Calculates two times the peak electric energy, or 4 times the RMS, :math:`4*\mathcal{E}_{\mathrm{elec}}`
(since we do not divide by 2 and use the peak phasors).
.. math::
\mathcal{E}_{\mathrm{elec}}=\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}
volume : string | 'AllObjects'
smooth : bool | False
Smooth the electric field or not when performing calculation
Example use to calcualte the energy participation of a substrate
.. code-block python
ℰ_total = epr_hfss.calc_energy_electric(volume='AllObjects')
ℰ_substr = epr_hfss.calc_energy_electric(volume='Box1')
print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%')
'''
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
if smooth:
vecE = vecE.smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_energy_magnetic(self,
variation=None,
volume='AllObjects',
smooth=True):
'''
See calc_energy_electric
'''
calcobject = CalcObject([], self.setup)
vecH = calcobject.getQty("H")
if smooth:
vecH = vecH.smooth()
A = vecH.times_mu()
B = vecH.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_p_electric_volume(self,
name_dielectric3D,
relative_to='AllObjects',
E_total=None
):
r'''
Calculate the dielectric energy-participatio ratio
of a 3D object (one that has volume) relative to the dielectric energy of
a list of object objects.
This is as a function relative to another object or all objects.
When all objects are specified, this does not include any energy
that might be stored in any lumped elements or lumped capacitors.
Returns:
---------
ℰ_object/ℰ_total, (ℰ_object, _total)
'''
if E_total is None:
logger.debug('Calculating ℰ_total')
ℰ_total = self.calc_energy_electric(volume=relative_to)
else:
ℰ_total = E_total
logger.debug('Calculating ℰ_object')
ℰ_object = self.calc_energy_electric(volume=name_dielectric3D)
return ℰ_object/ℰ_total, (ℰ_object, ℰ_total)
def calc_current(self, fields, line):
'''
Function to calculate Current based on line. Not in use
line : integration line between plates - name
'''
self.design.Clear_Field_Clac_Stack()
comp = fields.Vector_H
exp = comp.integrate_line_tangent(line)
I = exp.evaluate(phase = 90)
self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_mag(self, variation, junc_rect, junc_line):
''' Peak current I_max for mdoe J in junction J
The avg. is over the surface of the junction. I.e., spatial. '''
lv = self.get_lv(variation)
jl, uj = self.get_junc_len_dir(variation, junc_line)
uj = ConstantVecCalcObject(uj, self.setup)
calc = CalcObject([], self.setup)
#calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect)
calc = (((calc.getQty("Jsurf")).dot(uj)).imag()
).integrate_surf(name=junc_rect)
I = calc.evaluate(lv=lv) / jl # phase = 90
#self.design.Clear_Field_Clac_Stack()
return I
def calc_current_line_voltage(self, variation, junc_line_name, junc_L_Henries):
'''
Peak current I_max for prespecified mode calculating line voltage across junction.
Parameters:
------------------------------------------------
variation: variation number
junc_line_name: name of the HFSS line spanning the junction
junc_L_Henries: junction inductance in henries
TODO: Smooth?
'''
lv = self.get_lv(variation)
v_calc_real = CalcObject([], self.setup).getQty(
"E").real().integrate_line_tangent(name=junc_line_name)
v_calc_imag = CalcObject([], self.setup).getQty(
"E").imag().integrate_line_tangent(name=junc_line_name)
V = np.sqrt(v_calc_real.evaluate(lv=lv)**2 +
v_calc_imag.evaluate(lv=lv)**2)
freq = CalcObject(
[('EnterOutputVar', ('Freq', "Complex"))], self.setup).real().evaluate()
return V/(2*np.pi*freq*junc_L_Henries) # I=V/(wL)s
def calc_line_current(self, variation, junc_line_name):
lv = self.get_lv(variation)
calc = CalcObject([], self.setup)
calc = calc.getQty("H").imag().integrate_line_tangent(
name=junc_line_name)
#self.design.Clear_Field_Clac_Stack()
return calc.evaluate(lv=lv)
def get_junc_len_dir(self, variation, junc_line):
'''
Return the length and direction of a junction defined by a line
Inputs: variation: simulation variation
junc_line: polyline object
Outputs: jl (float) junction length
uj (list of 3 floats) x,y,z coordinates of the unit vector
tangent to the junction line
'''
#
lv = self.get_lv(variation)
u = []
for coor in ['X', 'Y', 'Z']:
calc = CalcObject([], self.setup)
calc = calc.line_tangent_coor(junc_line, coor)
u.append(calc.evaluate(lv=lv))
jl = float(np.sqrt(u[0]**2+u[1]**2+u[2]**2))
uj = [float(u[0]/jl), float(u[1]/jl), float(u[2]/jl)]
return jl, uj
def get_Qseam(self, seam, mode, variation):
r'''
Caculate the contribution to Q of a seam, by integrating the current in
the seam with finite conductance: set in the config file
ref: http://arxiv.org/pdf/1509.01119.pdf
'''
lv = self.get_lv(variation)
Qseam = OrderedDict()
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseam['Qseam_'+seam+'_' +
str(mode)] = config.Dissipation_params.gseam/yseam
print('Qseam_' + seam + '_' + str(mode) + str(' = ') +
str(config.Dissipation_params.gseam/config.Dissipation_params.yseam))
return Series(Qseam)
def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, pltresult=True):
# values = ['5mm','6mm','7mm']
# ref: http://arxiv.org/pdf/1509.01119.pdf
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
print(variation)
print(type(variation))
print(ureg(variation))
self.U_H = self.calc_energy_magnetic(variation)
lv = self.get_lv(variation)
Qseamsweep = []
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
for value in values:
self.design.set_variable(variable, str(value)+unit)
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseamsweep.append(config.Dissipation_params.gseam/yseam)
# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam
#Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
if pltresult:
_, ax = plt.subplots()
ax.plot(values, Qseamsweep)
ax.set_yscale('log')
ax.set_xlabel(variable+' ('+unit+')')
ax.set_ylabel('Q'+'_'+seam)
return Qseamsweep
def get_Qdielectric(self, dielectric, mode, variation):
Qdielectric = OrderedDict()
print('Calculating Qdielectric_' + dielectric + ' for mode ' +
str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
U_dielectric = self.calc_energy_electric(variation, volume=dielectric)
p_dielectric = U_dielectric/self.U_E
#TODO: Update make p saved sep. and get Q for diff materials, indep. specify in pinfo
Qdielectric['Qdielectric_'+dielectric+'_' +
str(mode)] = 1/(p_dielectric*config.Dissipation_params.tan_delta_sapp)
print('p_dielectric'+'_'+dielectric+'_' +
str(mode)+' = ' + str(p_dielectric))
return Series(Qdielectric)
def get_Qsurface_all(self, mode, variation):
'''
caculate the contribution to Q of a dieletric layer of dirt on all surfaces
set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
lv = self.get_lv(variation)
Qsurf = OrderedDict()
print('Calculating Qsurface for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# A = self.fields.Mag_E**2
# A = A.integrate_vol(name='AllObjects')
# U_surf = A.evaluate(lv=lv)
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
A = vecE
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_surf(name='AllObjects')
U_surf = A.evaluate(lv=lv)
U_surf *= config.Dissipation_params.th*epsilon_0*config.Dissipation_params.eps_r
p_surf = U_surf/self.U_E
Qsurf['Qsurf_'+str(mode)] = 1 / \
(p_surf*config.Dissipation_params.tan_delta_surf)
print('p_surf'+'_'+str(mode)+' = ' + str(p_surf))
return Series(Qsurf)
def calc_Q_external(self, variation, freq_GHz, U_E):
'''
Calculate the coupling Q of mode m with each port p
Expected that you have specified the mode before calling this
'''
Qp = pd.Series({})
freq = freq_GHz * 1e9 # freq in Hz
for port_nm, port in self.pinfo.ports.items():
I_peak = self.calc_avg_current_J_surf_mag(variation, port['rect'],
port['line'])
U_dissip = 0.5 * port['R'] * I_peak**2 * 1 / freq
p = U_dissip / (U_E/2) # U_E is 2x the peak electrical energy
kappa = p * freq
Q = 2 * np.pi * freq / kappa
Qp['Q_' + port_nm] = Q
return Qp
def calc_p_junction(self, variation, U_H, U_E, Ljs):
'''
Expected that you have specified the mode before calling this, `self.set_mode(num)`
Expected to precalc U_H and U_E for mode, will retunr pandas series object
junc_rect = ['junc_rect1', 'junc_rect2'] name of junc rectangles to integrate H over
junc_len = [0.0001] specify in SI units; i.e., meters
LJs = [8e-09, 8e-09] SI units
calc_sign = ['junc_line1', 'junc_line2']
This function assumes there are no lumped capacitors in model.
Potential errors: If you dont have a line or rect by the right name you will prob get an erorr o the type:
com_error: (-2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024365), None)
'''
Pj = pd.Series({})
Sj = pd.Series({})
for junc_nm, junc in self.pinfo.junctions.items():
logger.debug(f'Calculating participation for {(junc_nm, junc)}')
# Get peak current though junction I_peak
if self.pinfo.options.method_calc_P_mj is 'J_surf_mag':
I_peak = self.calc_avg_current_J_surf_mag(
variation, junc['rect'], junc['line'])
elif self.pinfo.options.method_calc_P_mj is 'line_voltage':
I_peak = self.calc_current_line_voltage(
variation, junc['line'], Ljs[junc_nm])
else:
raise NotImplementedError(
'Other calculation methods (self.pinfo.options.method_calc_P_mj) are possible but not implemented here. ')
Pj['p_' + junc_nm] = Ljs[junc_nm] * \
I_peak**2 / U_E
# divie by U_E: participation normed to be between 0 and 1 by the total capacitive energy
# which should be the total inductive energy
# Sign bit
Sj['s_' + junc_nm] = + \
1 if (self.calc_line_current(
variation, junc['line'])) > 0 else -1
if self.verbose:
print('\t{:<15} {:>8.6g} {:>5s}'.format(
junc_nm,
Pj['p_' + junc_nm],
'+' if Sj['s_' + junc_nm] > 0 else '-'))
return Pj, Sj
def do_EPR_analysis(self,
variations=None,
modes=None):
"""
Main analysis routine
Load results with pyEPR_Analysis
..code-block python
pyEPR_Analysis(self.data_filename, variations=variations) ```
Optional Parameters:
------------------------
variations : list | None
Example list of variations is ['0', '1']
A variation is a combination of project/design variables in an optimetric sweep
modes : list | None
Modes to analyze
for example modes = [0, 2, 3]
HFSS Notes:
------------------------
Assumptions:
Low dissipation (high-Q).
Right now, we assume that there are no lumped capcitors to simply calculations. Not required.
We assume that there are only lumped inductors, so that U_tot = U_E+U_H+U_L and U_C =0, so that U_tot = 2*U_E;
"""
self._run_time = time.strftime('%Y%m%d_%H%M%S', time.localtime())
self.update_variation_information()
if modes is None:
modes = range(self.nmodes)
if variations is None:
variations = self.variations
# Setup save and save pinfo
#TODO: The pd.HDFStore is used to store the pandas sereis and dataframe, but is otherwise cumbersome.
# We should move to a better saving paradigm
if self.latest_h5_path is not None and self.append_analysis:
shutil.copyfile(self.latest_h5_path, self.data_filename)
hdf = pd.HDFStore(self.data_filename)
self.pinfo.save(hdf) # This will save only 1 globalinstance
### Main loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for ii, variation in enumerate(variations):
#TODO: Move this into a funciton calle self.analyze_variation
print(f'\nVariation {variation} [{ii+1}/{len(variations)}]')
# Previously analyzed?
if (variation+'/hfss_variables') in hdf.keys() and self.append_analysis:
print_NoNewLine(' previously analyzed ...\n')
continue
self.lv = self.get_lv(variation)
time.sleep(0.4)
if self.has_fields() == False:
logger.error(f" Error: HFSS does not have field solution for mode={mode}.\
Skipping this mode in the analysis")
continue
freqs_bare_GHz, Qs_bare = self.get_freqs_bare_pd(variation)
self.hfss_variables[variation] = pd.Series(
self.get_variables(variation=variation))
Ljs = pd.Series({})
for junc_name, val in self.junctions.items(): # junction nickname
Ljs[junc_name] = ureg.Quantity(
self.hfss_variables[variation]['_'+val['Lj_variable']]).to_base_units().magnitude
# TODO: add this as pass and then set an attribute that specifies which pass is the last pass.
# so we can save vs pass
hdf['v'+variation+'/freqs_bare_GHz'] = freqs_bare_GHz
hdf['v'+variation+'/Qs_bare'] = Qs_bare
hdf['v'+variation+'/hfss_variables'] = self.hfss_variables[variation]
hdf['v'+variation+'/Ljs'] = Ljs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# This is crummy now. Maybe use xarray.
Om = OrderedDict() # Matrix of angular frequency (of analyzed modes)
Pm = OrderedDict() # Participation P matrix
Sm = OrderedDict() # Sign S matrix
Qm_coupling = OrderedDict() # Quality factor matrix
SOL = OrderedDict() # other results
for mode in modes: # integer of mode number [0,1,2,3,..]
# Mode setup & load fields
print(f' Mode {mode} [{mode+1}/{self.nmodes}]')
self.set_mode(mode)
# Get hfss solved frequencie
_Om = pd.Series({})
_Om['freq_GHz'] = freqs_bare_GHz[mode] # freq
Om[mode] = _Om
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# EPR Hamiltonian calculations
# Calculation global energies and report
print(' Calculating ℰ_electric', end=',')
try:
self.U_E = self.calc_energy_electric(variation)
except Exception as e:
tb = sys.exc_info()[2]
print("\n\nError:\n", e)
raise(Exception(' Did you save the field solutions?\n Failed during calculation of the total magnetic energy. This is the first calculation step, and is indicative that there are no field solutions saved. ').with_traceback(tb))
print(' ℰ_magnetic')
self.U_H = self.calc_energy_magnetic(variation)
sol = Series({'U_H': self.U_H, 'U_E': self.U_E})
print(f""" {'(ℰ_E-ℰ_H)/ℰ_E':>15s} {'ℰ_E':>9s} {'ℰ_H':>9s}
{100*(self.U_E - self.U_H)/self.U_E:>15.1f}% {self.U_E:>9.4g} {self.U_H:>9.4g}\n""")
# Calcualte EPR for each of the junctions
print(f' Calculating junction EPR [method={self.pinfo.options.method_calc_P_mj}]')
print(f"\t{'junction':<15s} EPR p_{mode}j sign s_{mode}j")
Pm[mode], Sm[mode] = self.calc_p_junction(
variation, self.U_H, self.U_E, Ljs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# EPR Dissipative calculations
# TODO: this should really be passed as argument to the functions rather than a property of the calss I would say
self.omega = 2*np.pi*freqs_bare_GHz[mode]
Qm_coupling[mode] = self.calc_Q_external(variation,
freqs_bare_GHz[mode],
self.U_E)
if self.pinfo.dissipative.seams is not None: # get seam Q
for seam in self.pinfo.dissipative.seams:
sol = sol.append(self.get_Qseam(seam, mode, variation))
if self.pinfo.dissipative.dielectrics_bulk is not None: # get Q dielectric
for dielectric in self.pinfo.dissipative.dielectrics_bulk:
sol = sol.append(self.get_Qdielectric(
dielectric, mode, variation))
if self.pinfo.dissipative.resistive_surfaces is not None:
if self.pinfo.dissipative.resistive_surfaces is 'all': # get Q surface
sol = sol.append(
self.get_Qsurface_all(mode, variation))
else:
raise NotImplementedError(
"Join the team, by helping contribute this piece of code.")
if self.pinfo.dissipative.resistive_surfaces is not None:
raise NotImplementedError(
"Join the team, by helping contribute this piece of code.")
SOL[mode] = sol
# Save
self._save_variation(hdf, variation, Om, Pm, Sm, Qm_coupling, SOL)
hdf.close()
print('\nANALYSIS DONE. Data saved to:\n\n' + self.data_filename+'\n\n')
return self.data_filename, variations
def _save_variation(self, hdf, variation, Om, Pm, Sm, Qm_coupling, SOL):
'''
Save variation
'''
hdf['v'+variation+'/O_matrix'] = pd.DataFrame(Om)
# raw, not normalized
hdf['v'+variation+'/P_matrix'] = pd.DataFrame(Pm).transpose()
hdf['v'+variation+'/S_matrix'] = pd.DataFrame(Sm).transpose()
hdf['v'+variation +
'/Q_coupling_matrix'] = pd.DataFrame(Qm_coupling).transpose()
hdf['v'+variation+'/pyEPR_sols'] = pd.DataFrame(SOL).transpose()
if self.options.save_mesh_stats:
mesh = self.get_mesh_statistics(variation)
if mesh is not None:
hdf['v'+variation+'/mesh_stats'] = mesh # dataframe
conv = self.get_convergence(variation)
if conv is not None:
hdf['v'+variation+'/convergence'] = conv # dataframe
convergence_f = self.hfss_report_f_convergence(variation) # dataframe
if (not (conv is None)) and (not (conv is [])):
hdf['v'+variation+'/convergence_f_pass'] = convergence_f # dataframe
def get_mesh_statistics(self, variation='0'):
'''
Input:
variation='0' ,'1', ...
Returns dataframe:
```
Name Num Tets Min edge length Max edge length RMS edge length Min tet vol Max tet vol Mean tet vol Std Devn (vol)
0 Region 909451 0.000243 0.860488 0.037048 6.006260e-13 0.037352 0.000029 6.268190e-04
1 substrate 1490356 0.000270 0.893770 0.023639 1.160090e-12 0.031253 0.000007 2.309920e-04
```
'''
variation = self.listvariations[ureg(variation)]
return self.setup.get_mesh_stats(variation)
def get_convergence(self, variation='0'):
'''
Input:
variation='0' ,'1', ...
Returns dataframe:
```
Solved Elements Max Delta Freq. % Pass Number
1 128955 NaN
2 167607 11.745000
3 192746 3.208600
4 199244 1.524000
```
'''
variation = self.listvariations[ureg(variation)]
df, _ = self.setup.get_convergence(variation)
return df
def get_convergence_vs_pass(self, variation='0'):
'''
Returns a convergence vs pass number of the eignemode freqs.
Makes a plot in HFSS that return a pandas dataframe:
```
re(Mode(1)) [g] re(Mode(2)) [g] re(Mode(3)) [g]
Pass []
1 4.643101 4.944204 5.586289
2 5.114490 5.505828 6.242423
3 5.278594 5.604426 6.296777
```
'''
return self.hfss_report_f_convergence(variation)
def set_mode(self, mode_num, phase=0):
'''
Set source excitations should be used for fields post processing.
Counting modes from 0 onward
'''
if mode_num < 0:
logger.error('Too small a mode number')
self.solutions.set_mode(mode_num + 1, phase)
if self.has_fields() == False:
logger.warning(f" Error: HFSS does not have field solution for mode={mode_num}.\
Skipping this mode in the analysis")
self.fields = self.setup.get_fields()
def get_variation_nominal(self):
return self.design.get_nominal_variation()
def get_variations_all(self):
self.update_variation_information()
return self.listvariations
def update_variation_information(self):
''''
Updates all information about the variations.
nmodes, listvariations, nominalvariation, nvariations
variations = ['0','1','2'] or [] for empty
'''
self.nmodes = int(self.setup.n_modes)
self.listvariations = self.design._solutions.ListVariations(str(self.setup.solution_name))
self.nominalvariation = self.design.get_nominal_variation()
self.nvariations = np.size(self.listvariations)
self.variations = [str(i) for i in range(self.nvariations)]
def has_fields(self, variation=None):
'''
Determine if fields exist for a particular solution.
variation : str | None
If None, gets the nominal variation
'''
return self.solutions.has_fields(variation)
def hfss_report_f_convergence(self, variation= '0'):
'''
Create a report in HFSS to plot the converge of freq and style it
Returns a convergence vs pass number of the eignemode freqs.
Returns a pandas dataframe:
```
re(Mode(1)) [g] re(Mode(2)) [g] re(Mode(3)) [g]
Pass []
1 4.643101 4.944204 5.586289
2 5.114490 5.505828 6.242423
3 5.278594 5.604426 6.296777
```
'''
#TODO: Move to class for reporter ?
if not (self.design.solution_type == 'Eigenmode'):
return None
oDesign = self.design
variation = self.get_lv(variation)
report = oDesign._reporter
# Create report
ycomp = [f"re(Mode({i}))" for i in range(1,1+self.nmodes)]
params = ["Pass:=", ["All"]]+variation
report_name = "Freq. vs. pass"
if report_name in report.GetAllReportNames():
report.DeleteReports([report_name])
self.solutions.create_report(report_name, "Pass", ycomp, params, pass_name='AdaptivePass')
# Properties of lines
curves = [f"{report_name}:re(Mode({i})):Curve1" for i in range(1,1+self.nmodes)]
set_property(report, 'Attributes', curves, 'Line Width', 3)
set_property(report, 'Scaling', f"{report_name}:AxisY1", 'Auto Units', False)
set_property(report, 'Scaling', f"{report_name}:AxisY1", 'Units', 'g')
set_property(report, 'Legend', f"{report_name}:Legend", 'Show Solution Name', False)
if 1: # Save
try:
path = Path(self.data_dir)/'hfss_eig_f_convergence.csv'
report.ExportToFile(report_name,path)
return pd.read_csv(path, index_col= 0)
except Exception as e:
logger.error(f"Error could not save and export hfss plot to {path}. Is the plot made in HFSS with the correct name. Check the HFSS error window. \t Error = {e}")
return None
def hfss_report_full_convergence(self, fig=None,_display=True):
if fig is None:
fig = plt.figure(figsize=(11,3.))
fig.clf()
gs = mpl.gridspec.GridSpec(1, 3, width_ratios=[1.2, 1.5, 1])#, height_ratios=[4, 1], wspace=0.5
axs = [fig.add_subplot(gs[i]) for i in range(3)]
for variation in self.variations:
print(variation)
convergence_t = self.get_convergence()
convergence_f = self.hfss_report_f_convergence()
ax0t = axs[1].twinx()
plot_convergence_f_vspass(axs[0], convergence_f)
plot_convergence_max_df(axs[1], convergence_t.iloc[:,1])
plot_convergence_solved_elem(ax0t, convergence_t.iloc[:,0])
plot_convergence_maxdf_vs_sol(axs[2], convergence_t.iloc[:,1],
convergence_t.iloc[:,0])
fig.tight_layout(w_pad=0.1)#pad=0.0, w_pad=0.1, h_pad=1.0)
if _display:
from IPython.display import display
display(fig)
return fig
#%%==============================================================================
### ANALYSIS FUNCTIONS
#==============================================================================
def pyEPR_ND(freqs, Ljs, ϕzpf,
cos_trunc=8,
fock_trunc=9,
use_1st_order=False,
return_H=False):
'''
Numerical diagonalizaiton for pyEPR.
:param fs: (GHz, not radians) Linearized model, H_lin, normal mode frequencies in Hz, length M
:param ljs: (Henries) junction linerized inductances in Henries, length J
:param fzpfs: (reduced) Reduced Zero-point fluctutation of the junction fluxes for each mode across each junction, shape MxJ
:return: Hamiltonian mode freq and dispersive shifts. Shifts are in MHz. Shifts have flipped sign so that down shift is positive.
'''
freqs, Ljs, ϕzpf = map(np.array, (freqs, Ljs, ϕzpf))
assert(all(freqs < 1E6)), "Please input the frequencies in GHz"
assert(all(Ljs < 1E-3)), "Please input the inductances in Henries"
Hs = bbq_hmt(freqs * 1E9, Ljs.astype(np.float), fluxQ*ϕzpf,
cos_trunc, fock_trunc, individual=use_1st_order)
f_ND, χ_ND, _, _ = make_dispersive(
Hs, fock_trunc, ϕzpf, freqs, use_1st_order=use_1st_order)
χ_ND = -1*χ_ND * 1E-6 # convert to MHz, and flip sign so that down shift is positive
return (f_ND, χ_ND, Hs) if return_H else (f_ND, χ_ND)
#==============================================================================
# ANALYSIS BBQ
#==============================================================================
class Results_Hamiltonian(OrderedDict):
'''
Class to store and process results from the analysis of H_nl.
'''
#TODO: make this savable and loadable
def get_vs_variation(self, quantity):
res = OrderedDict()
for k, r in self.items():
res[k] = r[quantity]
return res
def get_frequencies_HFSS(self):
z = sort_df_col(pd.DataFrame(self.get_vs_variation('f_0')))
z.index.name = 'eigenmode'
z.columns.name = 'variation'
return z
def get_frequencies_O1(self):
z = sort_df_col(pd.DataFrame(self.get_vs_variation('f_1')))
z.index.name = 'eigenmode'
z.columns.name = 'variation'
return z
def get_frequencies_ND(self):
z = sort_df_col(pd.DataFrame(self.get_vs_variation('f_ND')))
z.index.name = 'eigenmode'
z.columns.name = 'variation'
return z
def get_chi_O1(self):
z = self.get_vs_variation('chi_O1')
return z
def get_chi_ND(self):
z = self.get_vs_variation('chi_ND')
return z
class pyEPR_Analysis(object):
'''
Defines an analysis object which loads and plots data from a h5 file
This data is obtained using pyEPR_HFSS
'''
def __init__(self, data_filename, variations=None, do_print_info = True):
self.data_filename = data_filename
self.results = Results_Hamiltonian()
with HDFStore(data_filename, mode = 'r') as hdf: # = h5py.File(data_filename, 'r')
self.junctions = hdf['/project_info_junctions']
self.project_info = Series(hdf['project_info'])
self.project_info_dissip = Series(hdf['/project_info_dissip'])
self.project_info_options = | Series(hdf['/project_info_options']) | pandas.Series |
# coding: utf-8
# In[2]:
# FIXME : 以下の関数は定義されたファイルの形式に依存するので、utilsに記載できない。
def is_env_notebook():
"""Determine wheather is the environment Jupyter Notebook"""
if 'get_ipython' not in globals():
# Python shell
return False
env_name = get_ipython().__class__.__name__
if env_name == 'TerminalInteractiveShell':
# IPython shell
return False
# Jupyter Notebook
return True
# In[3]:
#import sys
#sys.path.append('.')
import argparse
from collections import defaultdict, Counter
import random
import os
import pandas as pd
import tqdm
from IPython.core.debugger import Pdb
ON_KAGGLE: bool = 'KAGGLE_WORKING_DIR' in os.environ
if ON_KAGGLE:
from .dataset import DATA_ROOT,EXTERNAL_ROOT
else:
from dataset import DATA_ROOT,EXTERNAL_ROOT
# In[11]:
# make_foldsはマルチラベル用になってる。
def make_folds_for_multilabel(n_folds: int) -> pd.DataFrame:
df = pd.read_csv(DATA_ROOT / 'train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
def make_folds(n_folds:int,seed:int=42,rmdup:bool=True) -> pd.DataFrame:
if rmdup:
# 重複除去について
strmd5 = ( | pd.read_csv("../input/strmd5/strMd5.csv") | pandas.read_csv |
"""
区别于股票代码,指数代码请添加"I",如000300(沪深300),其符号I000300
"""
import pandas as pd
from cswd.utils import ensure_list, sanitize_dates
#from cswd.sqldata.stock_daily import StockDailyData
from cswd.sqldata.query import query_adjusted_pricing
from cswd.sqldata.stock_index_daily import StockIndexDailyData
from zipline.assets._assets import Equity
index_sid_to_code = lambda x:str(int(x) - 100000).zfill(6) # 指数sid -> 指数代码
def _pricing_factory(code, fields, start, end, normalize):
is_index = False
if isinstance(code, str):
symbol = code
elif isinstance(code, Equity):
symbol = code.symbol
else:
raise TypeError('code只能是str或Equity类型')
if len(symbol) == 7:
is_index = True
symbol = symbol[1:]
if symbol[0] in ('1','4'):
is_index = True
symbol = index_sid_to_code(symbol)
if is_index:
df = StockIndexDailyData.query_by_code(symbol).loc[start:end, :]
df = df.reindex(columns=fields, fill_value=0)
else:
#df = StockDailyData.query_by_code(symbol).loc[start:end, fields]
df = query_adjusted_pricing(symbol, start, end, fields, normalize)
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
return df
def get_pricing(codes, fields = 'close', start = None, end = None,
assets_as_columns = False,
normalize = False):
"""
加载股票或指数期间日线数据
Parameters
----------
codes : [str或Equity对象]
股票或指数代码列表,或Equity对象
fields : [str]
读取列名称
start : datetime-like
开始日期,默认 today - 365
end : datetime-like
结束日期,默认 today
assets_as_columns : bool
是否将符号作为列名称。默认为假。
normalize : bool
是否将首日价格调整为1.0。默认为假。
Examples
--------
>>> get_pricing(['I000001','000002','000333']).tail()
close
date code
2017-12-07 000333 49.1300
I000001 3272.0542
2017-12-08 000002 29.8200
000333 50.4300
I000001 3289.9924
>>> # 指数数据中不存在的字段以0值填充
>>> get_pricing(['I000001','000002','000333'], ['high','low', 'cmv']).tail()
high low cmv
date code
2017-12-07 000333 51.0800 49.0500 3.165395e+11
I000001 3291.2817 3259.1637 0.000000e+00
2017-12-08 000002 29.8800 29.0500 2.899755e+11
000333 51.2000 49.4000 3.249153e+11
I000001 3297.1304 3258.7593 0.000000e+00
>>> # 股票代码作为列名称
>>> get_pricing(['I000001','000002','000333'], 'close', assets_as_columns=True).tail()
code 000002 000333 I000001
date
2017-12-04 30.85 51.50 3309.6183
2017-12-05 31.03 52.18 3303.6751
2017-12-06 30.77 50.95 3293.9648
2017-12-07 29.95 49.13 3272.0542
2017-12-08 29.82 50.43 3289.9924
>>> get_pricing(['I000001','000002','000333'], 'close', assets_as_columns=True, normalize=True).tail()
code 000002 000333 I000001
date
2017-12-11 1.466743 1.912764 3322.1956
2017-12-12 1.449101 1.901349 3280.8136
2017-12-13 1.467234 1.958425 3303.0373
2017-12-14 1.475564 1.944156 3292.4385
2017-12-15 1.426558 1.915618 3266.1371
"""
if start is None:
start = pd.Timestamp('today') - pd.Timedelta(days=365)
if end is None:
end = pd.Timestamp('today')
start, end = sanitize_dates(start, end)
codes = ensure_list(codes)
fields = ensure_list(fields)
dfs = []
for code in codes:
try:
# 可能期间不存在数据
df = _pricing_factory(code, fields, start, end, normalize)
except:
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Test suite for lower bounding techniques."""
import numpy as np
import pandas as pd
import pytest
from sktime.distances.lower_bounding import LowerBounding
from sktime.distances.tests._utils import create_test_distance_numpy
def _validate_bounding_result(
matrix: np.ndarray,
x: np.ndarray,
y: np.ndarray,
all_finite: bool = False,
all_infinite: bool = False,
is_gradient_bounding: bool = False,
):
"""Validate the bounding matrix is what is expected.
Parameters
----------
matrix: np.ndarray (2d array)
Bounding matrix.
x: np.ndarray (1d, 2d or 3d array)
First timeseries.
y: np.ndarray (1d, 2d or 3d array)
Second timeseries.
all_finite: bool, default = False
Boolean that when true will check all the values are finite.
all_infinite: bool, default = False
Boolean that when true will check all the values (aside the middle diagonal)
are infinite.
is_gradient_bounding: bool, default = False
Boolean that when true marks the bounding matrix as generated by an algorithm
that uses a gradient and therefore the first a second column are allowed to
be finite (aside the first and last element in the matrix).
"""
assert isinstance(matrix, np.ndarray), (
f"A bounding matrix must be of type np.ndarray. Instead one was provided with "
f"{type(matrix)} type."
)
assert matrix.ndim == 2, (
f"A bounding matrix must have two dimensions. Instead one was provided with "
f"{matrix.ndim} dimensions."
)
assert matrix.shape == (len(x), len(y)), (
f"A bounding matrix with shape len(x) by len(y) is expected ({len(x), len(y)}. "
f"Instead one was given with shape {matrix.shape}"
)
unique, counts = np.unique(matrix, return_counts=True)
count_dict = dict(zip(unique, counts))
for key in count_dict:
if np.isfinite(key):
assert count_dict[key] >= len(y) or all_infinite is False, (
"All the values in the bounding matrix should be finite. A infinite "
"value was found (aside from the diagonal)."
)
else:
if is_gradient_bounding:
max_infinite = len(y) + len(x) - 2 # -2 as 0,0 and n,m should be finite
assert count_dict[key] >= max_infinite or all_finite is False, (
"All values in the bounding matrix should be infinite. Aside"
"from the first column and last column."
)
else:
assert all_finite is False, (
"All values in the bounding matrix should be"
"infinite. A finite value was found"
)
def _validate_bounding(
x: np.ndarray,
y: np.ndarray,
) -> None:
"""Test each lower bounding with different parameters.
The amount of finite vs infinite values are estimated and are checked that many
is around the amount in the matrix.
Parameters
----------
x: np.ndarray (1d, 2d or 3d)
First timeseries
y: np.ndarray (1d, 2d, or 3d)
Second timeseries
"""
x_y_max = max(len(x), len(y))
no_bounding = LowerBounding.NO_BOUNDING
no_bounding_result = no_bounding.create_bounding_matrix(x, y)
_validate_bounding_result(no_bounding_result, x, y, all_finite=True)
sakoe_chiba = LowerBounding.SAKOE_CHIBA
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y),
x,
y,
)
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=3),
x,
y,
)
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=x_y_max),
x,
y,
all_finite=True,
)
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=0),
x,
y,
all_infinite=True,
)
itakura_parallelogram = LowerBounding.ITAKURA_PARALLELOGRAM
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y),
x,
y,
is_gradient_bounding=True,
)
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=3),
x,
y,
is_gradient_bounding=True,
)
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=x_y_max),
x,
y,
all_finite=True,
is_gradient_bounding=True,
)
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=0),
x,
y,
all_infinite=True,
is_gradient_bounding=True,
)
def test_lower_bounding() -> None:
"""Test for various lower bounding methods."""
no_bounding = LowerBounding.NO_BOUNDING
no_bounding_int = LowerBounding(1)
assert (
no_bounding_int is no_bounding
), "No bounding must be able to be constructed using the enum and a int value."
sakoe_chiba = LowerBounding.SAKOE_CHIBA
sakoe_chiba_int = LowerBounding(2)
assert (
sakoe_chiba_int is sakoe_chiba
), "Sakoe chiba must be able to be constructed using the enum and a int value."
itakura_parallelogram = LowerBounding.ITAKURA_PARALLELOGRAM
itakura_parallelogram_int = LowerBounding(3)
assert itakura_parallelogram_int is itakura_parallelogram, (
"Itakura parallelogram must be able to be constructed using the enum and a int "
"value"
)
_validate_bounding(
x=np.array([10.0]),
y=np.array([15.0]),
)
_validate_bounding(
x=create_test_distance_numpy(10),
y=create_test_distance_numpy(10, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 1),
y=create_test_distance_numpy(10, 1, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10),
y=create_test_distance_numpy(10, 10, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10, 1),
y=create_test_distance_numpy(10, 10, 1, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10, 10),
y=create_test_distance_numpy(10, 10, 10, random_state=2),
)
def test_incorrect_parameters() -> None:
"""Test to check correct errors raised."""
numpy_x = create_test_distance_numpy(10, 10)
numpy_y = create_test_distance_numpy(10, 10, random_state=2)
df_x = | pd.DataFrame(numpy_x) | pandas.DataFrame |
import os
from datetime import datetime
import nose
import pandas as pd
from pandas import compat
from pandas.util.testing import network, assert_frame_equal, with_connectivity_check
from numpy.testing.decorators import slow
import pandas.util.testing as tm
if compat.PY3:
raise nose.SkipTest("python-gflags does not support Python 3 yet")
try:
import httplib2
import pandas.io.ga as ga
from pandas.io.ga import GAnalytics, read_ga
from pandas.io.auth import AuthenticationConfigError, reset_default_token_store
from pandas.io import auth
except ImportError:
raise nose.SkipTest("need httplib2 and auth libs")
class TestGoogle(tm.TestCase):
_multiprocess_can_split_ = True
def test_remove_token_store(self):
auth.DEFAULT_TOKEN_FILE = 'test.dat'
with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh:
fh.write('test')
reset_default_token_store()
self.assertFalse(os.path.exists(auth.DEFAULT_TOKEN_FILE))
@with_connectivity_check("http://www.google.com")
def test_getdata(self):
try:
end_date = datetime.now()
start_date = end_date - pd.offsets.Day() * 5
end_date = end_date.strftime('%Y-%m-%d')
start_date = start_date.strftime('%Y-%m-%d')
reader = GAnalytics()
df = reader.get_data(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']},
index_col=0)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(df.index, pd.DatetimeIndex)
self.assertGreater(len(df), 1)
self.assertTrue('date' not in df)
self.assertTrue('hour' not in df)
self.assertEqual(df.index.name, 'ts')
self.assertTrue('avgTimeOnSite' in df)
self.assertTrue('visitors' in df)
self.assertTrue('newVisits' in df)
self.assertTrue('pageviewsPerVisit' in df)
df2 = read_ga(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']},
index_col=0)
| assert_frame_equal(df, df2) | pandas.util.testing.assert_frame_equal |
import re
import pandas as pd
def correct_gutenberg_meta(input_path: str, output_path: str):
with open(input_path, "r", encoding="utf-8") as file:
content = file.read()
content = re.sub(r'\n\n+', ' ', content)
# content = content.replace('\n\n', '')
# print(content[:1000])
new_lines = []
last_line = ""
for line_count, line in enumerate(content.split('\n')):
line_id_cand = line.split(',')[0]
if line_id_cand.isdigit():
new_lines.append(last_line)
last_line = ""
else:
pass
last_line += line
if last_line:
new_lines.append(last_line)
new_content = '\n'.join(new_lines)
with open(output_path, "w", encoding="utf-8") as file:
file.write(new_content)
def load_gutenberg_meta(path: str):
df = pd.read_csv(path)
d = {}
# gutenberg_id,title,author,gutenberg_author_id,language,gutenberg_bookshelf,rights,has_text
for i, row in df.iterrows():
author = row['author']
if not | pd.isna(author) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Combined GAC PSDM Model
Recoded from FORTRAN - AdDesignS - PSDM Model
Developed by:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Python Version
@author: <NAME> @UCChEJBB
<NAME>
EPA Disclaimer
==============
The United States Environmental Protection Agency (EPA) GitHub project code is
provided on an "as is" basis and the user assumes responsibility for its use.
EPA has relinquished control of the information and no longer has
responsibility to protect the integrity , confidentiality, or availability of
the information. Any reference to specific commercial products, processes, or
services by service mark, trademark, manufacturer, or otherwise, does not
constitute or imply their endorsement, recomendation or favoring by EPA. The
EPA seal and logo shall not be used in any manner to imply endorsement of any
commercial product or activity by EPA or the United States Government.
By submitting a pull request, you make an agreement with EPA that you will not
submit a claim of compensation for services rendered to EPA or any other
federal agency. Further, you agree not to charge the time you spend developing
software code related to this project to any federal grant or cooperative
agreement.
"""
import mkl
mkl.set_num_threads(1)
import warnings
warnings.simplefilter("ignore")
import pylab as plt
import numpy as np
import pandas as pd
import scipy as sp
# from scipy import special
from scipy.integrate import quad, solve_ivp
from scipy.interpolate import interp1d
# from scipy.optimize import minimize
import multiprocessing as mp
import time as ti #time as a variable is used in code, so ti is used
# import stopit # used to kill runs that take too long in analyze_all #consider getting rid of stopit....
#Read in all associated PSDM functions
from PSDM_functions import min_per_day, lpg, spar_Jac, foul_params, kf_calc
from PSDM_functions import find_minimum_df, tortuosity, calc_solver_matrix
from PSDM_functions import density, viscosity, recalc_k, generate_grid
from PSDM_functions import interp, process_input_data, process_input_file
# from PSDM_functions import *
# from PSDM_functions import find_minimum_df
from PSDM_tools import *
class PSDM():
def __init__(self, column_data, comp_data, rawdata_df, **kw):
'''
column_data: contains specific characteristics of the column (pandas_df)
'L' = length (cm)
'diam' = diameter (cm)
'wt' = mass of carbon (g)
'flrt' = flow rate (units/min) see flow_type for units
'rhop' = apparent density (gm/ml)
'rad' = particle radius (cm)
'tortu' = tortuosity
'psdfr' = pore to surface diffusion ratio
'influentID' = influent identifier (text)
'effluentID' = effluent identifier (text)
'units' = may have units also specified
--provided/processed by 'process_input_file' function
comp_data: dataframe with compound information
MW, MolarVol, BP, Density, Solubility (unused), VaporPress (unused)
data_df: contains raw data associated with the column
should contain influent and effluent data for the column
keywords:
project_name: (default = PSDM)
nr: number of radial collocation points (int, default = 14)
nz: number of axial collocation points (int, default = 19)
1/n: Freundlich 1/n value (default = 0.45)
temp: temperature in Celsius (default = 20)
time_type: hours, days, mins (default = 'days')
flow_type: gal, ml, L (default = 'ml')
conc_type: ug, ng (per liter) (default = 'ng')
break_type: calc, force (default = 'calc')
brk_df: breakthrough dataframe (default = None)
k_data: preprocessed k_data DataFrame (default = None)
duration: length of simulation (default = max of data provided)
water_type:
default = 'Organic Free'
chem_type:
default = 'halogenated alkenes'
test_range: default = np.linspace(1, 5, 41)
xn_range: default = np.arange(0.20, 0.95, 0.05)
'''
self.project_name = kw.get('project_name','PSDM')
#collocation initilazation
self.nc = kw.get('nr', 14) #set number of radial points, or 8
self.mc = kw.get('nz', 19) #set number of axial points, or 11
self.nz = self.mc * 1
self.ne = kw.get('ne', 1) # number of finite elements
solver_data = calc_solver_matrix(self.nc, self.mc, self.ne)
self.wr = solver_data['wr']
self.az = solver_data['az']
self.br = solver_data['br']
if self.mc != solver_data['mc']:
''' corrects for OCFE change to nz'''
self.mc = solver_data['mc']
self.nd = self.nc - 1
#set up temperature dependant values
self.temp = kw.get('temp',20)
self.vw = viscosity(self.temp)
self.dw = density(self.temp)
#define unit conversions (time)
if 'time' not in column_data.index:
self.time_type = kw.get('time_type', 'days')
if self.time_type == 'days': #base units in minutes
self.t_mult = min_per_day
elif self.time_type == 'hours':
self.t_mult = 60.
else:
self.t_mult = 1.
else:
self.time_type = column_data.loc['time']
self.t_mult = column_data.loc['t_mult']
# flow units
if 'flow_type' not in column_data.index:
self.flow_type = kw.get('flow_type', 'ml')
#deal with flow, conv. to liters
if self.flow_type == 'gal':
self.flow_mult = lpg
elif self.flow_type == 'ml':
self.flow_mult = 1e-3
else:
self.flow_mult = 1.
else:
self.flow_type = column_data.loc['flow_type']
self.flow_mult = column_data.loc['flow_mult']
# concentration units
if 'units' not in column_data.index:
self.conc_type = kw.get('conc_type', 'ng')
#deal with mass, conv. to ug
if self.conc_type == 'ug':
self.mass_mul = 1.
elif self.conc_type == 'ng':
self.mass_mul = 1e-3
else:
print('conc_type is invalid, ug/ng are valid options')
self.mass_mul = 1.
else:
self.conc_type = column_data.loc['units']
self.mass_mul = column_data.loc['mass_mul']
#initialize column characteristics #process dataframe
self.L = column_data['L']
self.diam = column_data['diam']
self.wt = column_data['wt']
self.flrt = column_data['flrt']# * self.flow_mult
self.rhop = column_data['rhop']
self.rhof = column_data['rhof']
self.rad = column_data['rad']
self.tortu = column_data['tortu']
self.psdfr = column_data['psdfr']
self.epor = column_data['epor']
self.influent = column_data['influentID']
self.carbon = column_data.name
self.duration = kw.get('duration',\
np.max(rawdata_df.index.values))
#calculate other fixed values
self.area = np.pi*(self.diam**2)/4.
self.bedvol = self.area * self.L
self.ebed = 1. - self.wt/(self.bedvol*self.rhop)
self.tau = self.bedvol * self.ebed * 60./self.flrt
self.sf = 0.245423867471 * self.flrt/self.area # gpm/ft**2
self.vs = self.flrt/(60.*self.area)
self.re = (2.*self.rad*self.vs*self.dw)/(self.ebed*self.vw)
#calculate Empty Bed Contact Time (EBCT)
self.ebct = self.area*self.L/self.flrt
#data
self.data_df = rawdata_df
self.comp_df = comp_data #information about the compounds
self.xn = kw.get('xn', 0.45)
self.xdata = rawdata_df.index.values
#breakthrough type, data
self.brk_type = kw.get('break_type','calc')
self.brk_df = kw.get('brk_df', None)
if self.brk_type == 'force': # self.brk_df != None and
self.brk_df['breakday'][self.brk_df['breakday']=='assume']=3000
self.brk_df['breakday'][self.brk_df['breakday']=='est']=2000
self.brk_df['breakday'][self.brk_df['breakday']=='no']=1000
for i in self.brk_df.index:
self.brk_df.iloc[i]['breakday'] = \
int(self.brk_df.iloc[i]['breakday']) #convert everything to interger
#filter
self.compounds = kw.get('compounds',
list(rawdata_df.columns.levels[1]))
self.num_comps = len(self.compounds) # used for multi_comp
self.__y0shape = (self.num_comps, self.nc+1, self.mc)
self.__altshape = (self.num_comps, self.nc, self.mc)
# precalculate jacobian sparsity matrix
self.jac_sparse = spar_Jac(self.num_comps, self.nc, self.nz, self.ne)
self.jac_sparse_single = spar_Jac(1, self.nc, self.nz, self.ne)
#calculate initial values
#might need to add more here
k_data = kw.get('k_data', [])
if len(k_data) == 0:
self.k_data = pd.DataFrame(index=['K','1/n', 'q', 'brk','AveC'], \
columns=self.compounds)
for comp in self.compounds:
k, q, classifier, brk, aveC = self.__calculate_capacity(comp)
self.k_data[comp]=np.array([k, self.xn, q, brk, aveC])
else:
self.k_data = k_data
if self.brk_type=='force':
tmp_assume = self.brk_df[(self.brk_df['carbon']==self.carbon)&\
(self.brk_df['breakday']!=3000)]
impacted = tmp_assume['compound'].values
for comp in self.compounds:
if comp in impacted:
k, q, classifier, brk, aveC = self.__calculate_capacity(comp)
self.k_data[comp]=np.array([k, self.xn, q, brk, aveC])
self.k_data_orig = self.k_data
self.water_type = kw.get('water_type','Organic Free')
self.chem_type = kw.get('chem_type', 'halogenated alkenes')
self.test_range = kw.get('test_range', np.linspace(1, 5, 41))
self.xn_range = kw.get('xn_range', np.arange(0.20, 0.95, 0.05))
#handling for multiprocessing
self.processes = kw.get('mp', mp.cpu_count())
self.optimize_flag = kw.get('optimize',True)
if len(self.test_range)==1 and len(self.xn_range)==1:
self.optimize_flag = False
#set the fouling parameters
#expected that rk1-rk4 are pandas.Series
self.rk1_store, self.rk2_store, self.rk3_store, self.rk4_store = self.__get_fouling_params()
#set maximum cycles to consider for full scale
self.max_days = kw.get('max_days', 2000) #number of days, 2000 default
self.timeout = kw.get('timeout', 300) # 30 second default timeout
self.plot_output = kw.get('plot', False)
self.file_output = kw.get('file_out', True) #need to turn on
self.solver = kw.get('solver', 'BDF') #defaults to BDF
self.fouling_dict = {} #set up empty dictionary for fouling functions
max_time = np.max([self.max_days, self.duration]) #doesn't do anything at the moment for k_fit
self.time_vals = np.linspace(0, max_time*self.t_mult, 500)
self.fouling_dict= self.__fouled_k_new(self.k_data.loc['K'], self.time_vals)
if False:
plt.figure()
for key in self.fouling_dict.keys():
plt.plot(self.time_vals/self.t_mult/7, \
self.fouling_dict[key](self.time_vals),\
label=key)
plt.legend()
# =============================================================================
# end __init__
# =============================================================================
def __get_fouling_params(self):
'''
water= [Rhine, Portage, Karlsruhe, Wausau, Haughton]
chemical= [halogenated alkanes, halogenated alkenes, trihalo-methanes
aromatics, nitro compounds, chlorinated compounds, phenols
PNAs, pesticides]
'''
a1, a2, a3, a4 = foul_params['water'][self.water_type]
if self.chem_type != 'PFAS':
b1, b2 = foul_params['chemical'][self.chem_type]
dummy_array = np.ones(len(self.compounds))
b1 = pd.Series(b1 * dummy_array, index=self.compounds)
b2 = pd.Series(b2 * dummy_array, index=self.compounds)
else:
pfas_dict = foul_params['chemical']['PFAS']
dict_keys = pfas_dict.keys() #available [a, b] pairs
b1 = pd.Series(index=self.compounds) #start empty storage pd.Series
b2 = pd.Series(index=self.compounds)
for comp in self.compounds:
if comp in dict_keys:
b1[comp] = pfas_dict[comp][0]
b2[comp] = pfas_dict[comp][1]
# b1[comp] = 1 - pfas_dict[comp][1]
# b2[comp] = 1 - pfas_dict[comp][0]
#forces the intercept of molar K reduction to 100% at t=0
else: #if no specific value provided, return 'Average'
b1[comp] = pfas_dict['Ave'][0]
b2[comp] = pfas_dict['Ave'][1]
rk1 = b1 * a1 + b2
rk2 = b1 * a2
rk3 = b1 * a3
rk4 = b1 * a4 #no factor of 100, in exponent (should there be b1?)
return rk1, rk2, rk3, rk4
def __fouled_k_new(self, k_data, t):
#works on unconverted time
#only works on multiplier
if type(t) == np.ndarray:
data_store = {}
for comp in self.compounds:
k_mult_pd = self.rk1_store[comp] + \
self.rk2_store[comp] * t + \
self.rk3_store[comp] * np.exp(self.rk4_store[comp] * t)
k_mult_pd[k_mult_pd < 1e-3] = 1e-3
data_store[comp] = interp1d(t,\
k_mult_pd, \
fill_value='extrapolate')
return data_store
def __calculate_capacity(self, compound):
flow = self.flrt * self.flow_mult
breakthrough_code = 'none'
carbon_mass = self.wt
infl = self.data_df[self.influent][compound]
effl = self.data_df[self.carbon][compound]
breakthrough_time = self.duration
k = 0.
q_meas = 0.
xdata = self.xdata
aveC = infl.mean() #calculates average influent concentration
if self.brk_type == 'calc':
donothing = False #set trigger for later process to False
if infl.sum() == 0:
print('No mass in influent for '+compound)
donothing = True
if effl.sum() == 0:
print('Insufficient data for breakthrough for '+compound)
donothing = False
diff = (infl-effl)#/infl
perc_diff = (infl-effl)/infl
done = False
if not donothing:
yte = effl.values
yti = infl.values
#check if there are already zeros (breakthrough point exact)
tmp_time = perc_diff[perc_diff==0.].index
if len(tmp_time) == 1:
breakthrough_time = tmp_time.values[0]
breakthrough_code = 'breakthrough'
done = True
elif len(tmp_time) > 1:
breakthrough_time = tmp_time.min()
breakthrough_code = 'breakthrough'
done = True
#check if there are transitions to negative (breakthrough in data, calculatable)
if not done:
tmp_time = diff[diff<0.].index.values
if len(tmp_time) > 0:
test = np.min(tmp_time)
test_f = interp1d(xdata, diff)
tmp = sp.optimize.root(test_f, test-1)
breakthrough_time = tmp.x
breakthrough_time = breakthrough_time[0]
breakthrough_code = 'breakthrough'
done = True
#check to see if there is possible breakthrough within 15% of influent
#and minimal change in derivative
if not done:
tmp_time = perc_diff[perc_diff < 0.1].index.values #
if len(tmp_time) > 0:
test = np.min(tmp_time)
if test != np.max(xdata):
tmp_diff = perc_diff[perc_diff.index>=test]
if np.max(tmp_diff.values) < 0.15 and np.min(tmp_diff-tmp_diff.values[0])>=0.:
breakthrough_code = 'implied'
breakthrough_time = test
done = True
#check for correlational agreement with values, to imply breakthrough
if not done:
length = len(yte)
corrv = np.array([np.corrcoef(yte[i:i+3],yti[i:i+3])[0,1] \
for i in range(length-2)])
corrv[np.isnan(corrv)] = 0.
if corrv[-1] >= 0.95 and yte[-1] > 0.:
for i in range(len(corrv)-1,0,-1):
if corrv[i] >= 0.95:
breakthrough_time = xdata[i]
breakthrough_code = 'implied'
done = True
elif corrv[i] < 0.95:
break
nZc = np.count_nonzero(yte) #used to determine linear regression
#try to estimate the breakthrough
if not done:
x = xdata[-(nZc+1):]
y = yte[-(nZc+1):]
A = np.vstack([x,np.ones(len(x))]).T
m,c = np.linalg.lstsq(A,y)[0] #fits line
if m > 0:
intersection = (infl.mean() - c)/m
else:
intersection = self.duration#np.max(xt) #aveC
breakthrough_time = intersection
breakthrough_code = 'estimated'
yti = np.append(yti, aveC)
yte = np.append(yte, aveC)
xdata = np.append(xdata, breakthrough_time)
f_infl = interp1d(xdata, yti)
f_effl = interp1d(xdata, yte)
int_infl = quad(f_infl, 0, breakthrough_time, points=xdata)[0]
int_effl = quad(f_effl, 0, breakthrough_time, points=xdata)[0]
qtmp = flow * self.t_mult * (int_infl - int_effl) * self.mass_mul
q_meas = qtmp/carbon_mass #ug/g
k = q_meas / ((aveC*self.mass_mul) ** self.xn)
aveC = int_infl/breakthrough_time # recalculate only what is inside breakthrough
elif self.brk_type == 'force':# and self.brk_df != None:
brk_df = self.brk_df
maxx = self.duration
brkdy = brk_df[(brk_df['carbon']==self.carbon) & \
(brk_df['compound'] == compound)]['breakday'].values[0]
if brkdy < 2000:
f_infl = interp1d(xdata, infl, fill_value='extrapolate')
f_effl = interp1d(xdata, effl, fill_value='extrapolate')
xdata = xdata[xdata <= brkdy]
if brkdy == 1000: #report min, but no breakthrough
qtmp = quad(f_infl, 0, maxx, points = xdata)[0] - \
quad(f_effl, 0, maxx, points = xdata)[0]
aveC = np.mean(infl)
else:
if brkdy in xdata:
aveC = quad(f_infl, 0, brkdy)[0]/brkdy
else:
tmpC = f_infl(brkdy)
tmpxdata = xdata[xdata<=brkdy]
tmpxdata = np.append(tmpxdata, tmpC)
aveC = quad(f_infl,0, brkdy)[0]/brkdy
xdata = np.append(xdata, brkdy)
qtmp = quad(f_infl, 0, brkdy, points = xdata)[0] - \
quad(f_effl, 0, brkdy, points = xdata)[0]
breakthrough_time = brkdy
int_infl = quad(f_infl, 0, breakthrough_time, points = xdata)[0]
int_effl = quad(f_effl, 0, breakthrough_time, points = xdata)[0]
qtmp = flow * self.t_mult * (int_infl - int_effl) * self.mass_mul
q_meas = qtmp/carbon_mass #ug/g
k = q_meas / ((aveC*self.mass_mul) ** self.xn)
elif brkdy == 2000: #estimate
nZc = np.count_nonzero(effl)
x = xdata[-(nZc+1):]
y = effl[-(nZc+1):]
A = np.vstack([x,np.ones(len(x))]).T
m,c = np.linalg.lstsq(A,y)[0] #fits line
if m > 0:
intersection = (infl.mean() - c)/m
else:
intersection = maxx #aveC
breakthrough_time = intersection
infl = np.append(infl, aveC)
effl = np.append(effl, aveC)
xdata = np.append(xdata, intersection)
f_infl = interp1d(xdata, infl, fill_value='extrapolate')
f_effl = interp1d(xdata, effl, fill_value='extrapolate')
aveC = quad(f_infl, 0, breakthrough_time)[0]/\
breakthrough_time
qtmp = quad(f_infl, 0, intersection)[0] - \
quad(f_effl, 0, intersection)[0]
q_meas = flow * self.t_mult * qtmp * self.mass_mul /\
(carbon_mass) #* self.mass_mul# ug/g
k = q_meas / ((aveC * self.mass_mul) ** self.xn)
breakthrough_code = 'supplied'
# returns capacity in (ug/g)(L/ug)**(1/n)
return k, q_meas, breakthrough_code, breakthrough_time, aveC
# =============================================================================
# End INIT and helper functions
# =============================================================================
def run_psdm_kfit(self, compound):
'''
time: must be specified in minutes
best_vals: [Dp, Ds, kf]
flow rate assumed to be in 'ml/min' in column properties
'''
#pull information out of solver_data/self
wr = self.wr
nc = self.nc
mc = self.mc
az = self.az
br = self.br
t_mult = self.t_mult
vw = self.vw #viscosity
dw = self.dw #density
epor = self.epor
rhop = self.rhop
ebed = self.ebed
tau = self.tau
rad = self.rad
molar_k_t = self.fouling_dict[compound] #multiplier for fouling
water_type = self.water_type
k_v = self.k_data[compound]['K']
q_v = self.k_data[compound]['q']
mw = self.comp_df[compound]['MW'] #molecular weight
mol_vol = self.comp_df[compound]['MolarVol'] # Molar volume
if self.time_type == 'days':
dstep = 0.25 * min_per_day
else:
dstep = 15.
#set up bindings for nonlocal varaibles
cinf = 1.
cout_f = 1.
tconv = 1.
time_dim = 1.
time_dim2 = 1.
ttol = 1.
tstep = 1.
ds_v = 1.
inf = self.data_df[self.influent][compound]
eff = self.data_df[self.carbon][compound]
#convert cbo to molar values
cbo = inf * self.mass_mul / mw #/ 1000. #(* self.mass_mult ????)
time = (inf.index * t_mult).values
if cbo.iloc[0] == 0.:
cb0 = 1.
else:
cb0 = cbo[0]
cin = cbo/cb0
try:
brk = self.k_data[compound]['brk']
except:
brk = np.max(inf.index.values)
tortu = 1.0 # tortuosity
psdfr = 5.0 # pore to surface diffusion ratio
nd = nc - 1
difl = 13.26e-5/(((vw * 100.)**1.14)*(mol_vol**0.589)) #vb
sc = vw / (dw * difl) #schmidt number
#set film and pore diffusion
multi_p = difl/(2*rad) # multiplier used for kf calculation
kf_v = kf_calc(multi_p, self.re, sc, ebed, corr='Chern and Chien')
if compound == 'Test':
kf_v = self.k_data['Test']['kf'] #will break
cout = eff * self.mass_mul / mw / cb0 #/ 1000.
dp_v = (difl/(tortu)) #*column_prop.loc['epor'] #porosity not used in AdDesignS appendix, removed to match
# @stopit.threading_timeoutable()
def run(k_val, xn):
nonlocal cinf
nonlocal cout_f
nonlocal tconv
nonlocal time_dim
nonlocal time_dim2
nonlocal ttol
nonlocal tstep
nonlocal ds_v # for output
aau = np.zeros(mc)
#==============================================================================
# #converts K to (umole/g)(L/umole)**(1/n), assumes units of (ug/g)(L/ug)**(1/n)
#==============================================================================
molar_k = k_val / mw / ((1. / mw) ** xn)
xni = 1./xn
ds_v = epor*difl*cb0*psdfr/(1e3*rhop*molar_k*cb0**xn)
if water_type != 'Organic Free':
ds_v /= 1e10 #1e6
d = ds_v/dp_v
qe = molar_k * cb0**xn
qte = 1. * qe
dgs = (rhop * qe * (1.-ebed) * 1000.)/(ebed * cb0)
dgp = epor * (1. - ebed)/(ebed)
dg = dgs + dgp
dgt = dg
dg1 = 1. + dgt
dgI = 1.0/dg
edd = dgt/dg #dgt changed from dg1
ym = qe/qte
eds = ds_v*dgs*tau/(rad**2)
if eds < 1e-130:
eds = 1e-130
edp = dp_v*dgp*tau/(rad**2)
# from orthog(n)
beds = (eds + d*edp) * edd * br[:-1]
bedp = edp * (1. - d) * edd * br[:-1]
#depends on kf
st = kf_v * (1. -ebed) * tau/(ebed*rad)
stdv = st * dgt # dgt changed from dg1
#convert to dimensionless
tconv = 60./(tau*dg1)
self.tconv = tconv
tstep = dstep * tconv
ttol = time[-1] * tconv
time_dim = time * tconv
numb = int(brk*3 + 1)
time_dim2 = np.linspace(0., brk * tconv * t_mult,\
num=numb, endpoint=True) #increase the number of sites to check for ssq
#set up time based influent data
cinf = interp1d(time, cin.values, fill_value='extrapolate')
# cinf = interp1d(time_dim, cin.values, fill_value = 'extrapolate')
cout_f = interp1d(time_dim, cout.values, fill_value='extrapolate')
#initialize storage arrays/matrices
n = (nc+1)*mc
y0 = np.zeros(n)
#new array methods, create all arrays needed by diffun
time_temp = np.arange(0, time[-1] + self.t_mult * 10, 1)
cinfA = cinf(time_temp)
if water_type != 'Organic Free':
tortu = tortuosity(time_temp)
else:
tortu = np.ones(len(time_temp))
facA = (1./tortu - d)/(1. - d)
foulFA = (1./molar_k_t(time_temp))**xni
ydot_tmp = np.zeros((nc+1, mc))
def diffun(t, y0):
nonlocal aau
nonlocal ydot_tmp
y0tmp = y0.reshape(ydot_tmp.shape)
ydot = ydot_tmp.copy()
idx = int(np.floor(t/tconv)) # assumes daily index provided
extra = t/tconv - idx
# #defines the influent concentration at time t
cinfl = interp(cinfA[idx: idx+2], extra) #
z = ym * y0tmp[:nc, :mc] #* ym #updated ym should always be 1 for single comp.
qte = z
yt0 = xni * z
z_c = z/qte
z_c[qte<=0.] = 0.
# z[qte>0.] = z_c[qte>0.] # should be 1 for single component.
q0 = yt0 * xn/ym
cpore = z_c * q0**xni * interp(foulFA[idx:idx+2], extra)
cpore[np.logical_or.reduce((qte<=0.,\
yt0<=0,\
xni*np.log10(q0)<-20,\
))] = 0.
cpore_tmp = cpore[nc-1]
cpore_tmp[cpore_tmp < 0.] = 0.
cbs = stdv*(y0tmp[nc]-cpore_tmp)
cbs[0] = 0.
bb = interp(facA[idx:idx+2],extra)*np.dot(bedp, cpore) +\
np.dot(beds, y0tmp[:nc, :])
ww = wr[:nd]@bb
# ydot[:nd,:] = bb
ydot[:nd,1:] = bb[:, 1:]
ydot[nc-1][0] = (stdv*dgI*(cinfl - cpore[nc-1][0]) - ww[0])/\
wr[nc-1] #iii
ydot[nc-1][1:] = (cbs[1:]*dgI - ww[1:])/wr[nc-1]
aau[1:] = (np.dot(az[1:,1:], y0tmp[-1, 1:]))
ydot[-1,1:] = (-dgt*(az[:,0]*cinfl + aau) - 3.* cbs)[1:] #dgt was changed from dg1
ydot = ydot.reshape((nc+1)*(mc))
return ydot
try:
y = solve_ivp(diffun,\
(0, ttol),\
y0, \
method=self.solver,\
jac_sparsity=self.jac_sparse_single,\
max_step=tstep/3,\
)
# defines interpolating function of predicted effluent
cp_tmp = y.y[-1]
cp_tmp[cp_tmp < 0.] = 0.#sets negative values to 0.
# cp_tmp[cp_tmp > np.max(cin)*3.] = np.max(cin)*3. #sets the max to 5x cb0
cp = interp1d(y.t, cp_tmp, fill_value='extrapolate')
self.ydot = y.y * cb0 * mw / self.mass_mul
self.yt = y.t / tconv / t_mult
except Exception as e:
print('Error produced: ', e,'\n', compound)
t_temp = np.linspace(0, ttol, 20)
cp_tmp = np.ones(20) # need a better error position
cp = interp1d(t_temp, cp_tmp, fill_value='extrapolate')
ssq = ((cout_f(time_dim2)-cp(time_dim2))**2).sum()
return cp, ssq
def run_fit(k_val, xn):
cp, ssq = run(k_val, xn)
# cp, ssq = run(k_val, xn, timeout=self.timeout)
return ssq
if self.optimize_flag:
k_mult = {}
for xns in self.xn_range:
k_mult[xns] = recalc_k(k_v, q_v, self.xn, xns)
ssqs = pd.DataFrame([[run_fit(i*k_mult[j],j)\
for j in self.xn_range] \
for i in self.test_range], \
index=self.test_range, \
columns=self.xn_range)
min_val = find_minimum_df(ssqs)
best_val_xn = min_val.columns[0]
best_val_k = min_val.index[0] * k_mult[best_val_xn]
best_fit, _ = run(best_val_k, best_val_xn)
min_val = min_val.values[0][0]
writer = pd.ExcelWriter('ssq_'+self.carbon+'-'+compound+'.xlsx')
ssqs.to_excel(writer, 'Sheet1')
writer.save()
else: #assume test_range and xn_range are single values
best_val_xn = self.xn_range[0]
best_val_k = self.test_range[0]
best_fit, min_val = run(best_val_k, best_val_xn)
# best_fit, min_val = run(best_val_k, best_val_xn, timeout=self.timeout)
ssqs = pd.DataFrame(min_val, columns=[best_val_xn],\
index=[best_val_k])
itp = np.arange(0., time[-1]/t_mult, dstep/t_mult)
output_fit = interp1d(itp, \
best_fit(itp*tconv*t_mult) * cb0 *\
mw / self.mass_mul, \
fill_value='extrapolate')
model_data = pd.DataFrame(output_fit(itp), \
columns = ['data'], \
index = itp)
writer = pd.ExcelWriter(self.project_name+'_'+compound+'-'+self.carbon+'.xlsx') #'-'+repr(round(best_val_xn,2))
model_data.to_excel(writer, 'model_fit')
inf.to_excel(writer, 'influent')
eff.to_excel(writer, 'effluent')
data_tmp = pd.Series([sc, self.re, difl, kf_v, best_val_k, best_val_xn,\
dp_v, ds_v, min_val, self.ebct, self.sf], \
index = ['Sc','Re','difl','kf','K','1/n','dp','ds','ssq','ebct','sf'])
data_tmp.to_excel(writer, 'parameters')
if self.optimize_flag:
ti.sleep(1)
writer.save()
return compound, best_val_k, best_val_xn, ssqs, model_data
#end kfit
#begin dsfit
def run_psdm_dsfit(self, compound):
'''
time: must be specified in minutes
flow rate assumed to be in 'ml/min' in column properties
'''
#pull information out of solver_data/self
wr = self.wr
nc = self.nc
mc = self.mc
az = self.az
br = self.br
t_mult = self.t_mult
vw = self.vw #viscosity
dw = self.dw #density
epor = self.epor
rhop = self.rhop
ebed = self.ebed
tau = self.tau
rad = self.rad
k_val = self.k_data[compound]['K']
xn = self.k_data[compound]['1/n']
mw = self.comp_df[compound]['MW'] #molecular weight
mol_vol = self.comp_df[compound]['MolarVol'] # Molar volume
if self.time_type == 'days':
dstep = 0.25 * min_per_day
else:
dstep = 15.
#set up bindings for nonlocal varaibles
cinf = 1.
cout_f = 1.
tconv = 1.
time_dim = 1.
time_dim2 = 1.
ttol = 1.
tstep = 1.
ds_v = 1.
inf = self.data_df[self.influent][compound]
eff = self.data_df[self.carbon][compound]
#convert cbo to molar values
cbo = inf * self.mass_mul / mw #/ 1000. #(* self.mass_mult ????)
time = (inf.index * t_mult).values
if cbo.iloc[0] == 0.:
cb0 = 1.
else:
cb0 = cbo[0]
cin = cbo/cb0
try:
brk = self.k_data[compound]['brk']
except:
brk = np.max(inf.index.values)
tortu = 1.0 # tortuosity
psdfr = 5.0 # pore to surface diffusion ratio
nd = nc - 1
difl = 13.26e-5/(((vw * 100.)**1.14)*(mol_vol**0.589)) #vb
sc = vw / (dw * difl) #schmidt number
#set film and pore diffusion
multi_p = difl/(2*rad) # multiplier used for kf calculation
kf_v = kf_calc(multi_p, self.re, sc, ebed, corr='Chern and Chien')
if compound == 'Test':
kf_v = self.k_data['Test']['kf'] #will break
cout = eff * self.mass_mul / mw / cb0 #/ 1000.
dp_v = (difl/(tortu)) #*column_prop.loc['epor'] #porosity not used in AdDesignS appendix, removed to match
ds_base = 1. #set up for nonlocal
# @stopit.threading_timeoutable()
def run(ds_mult):
nonlocal cinf
nonlocal cout_f
nonlocal tconv
nonlocal time_dim
nonlocal time_dim2
nonlocal ttol
nonlocal tstep
nonlocal ds_v # for output
nonlocal ds_base
aau = np.zeros(mc)
#==============================================================================
# #converts K to (umole/g)(L/umole)**(1/n), assumes units of (ug/g)(L/ug)**(1/n)
#==============================================================================
molar_k = k_val / mw / ((1. / mw) ** xn)
xni = 1./xn
ds_base = epor*difl*cb0*psdfr/(1e3*rhop*molar_k*cb0**xn)
if self.optimize_flag:
ds_v = ds_base * ds_mult
else:
ds_v = ds_mult
#multiplies ds by ds_mult, passed as argument
d = ds_v/dp_v
qe = molar_k * cb0**xn
qte = 1. * qe
dgs = (rhop * qe * (1.-ebed) * 1000.)/(ebed * cb0)
dgp = epor * (1. - ebed)/(ebed)
dg = dgs + dgp
dgt = dg
dg1 = 1. + dgt
dgI = 1.0/dg
edd = dg1/dg #dgt changed from dg1
ym = qe/qte
eds = ds_v*dgs*tau/(rad**2)
if eds < 1e-130:
eds = 1e-130
edp = dp_v*dgp*tau/(rad**2)
# from orthog(n)
beds = (eds + d*edp) * edd * br[:-1]
bedp = edp * (1. - d) * edd * br[:-1]
#depends on kf
st = kf_v * (1. -ebed) * tau/(ebed*rad)
stdv = st * dgt # dgt changed from dg1
#convert to dimensionless
tconv = 60./(tau*dg1)
tstep = dstep * tconv
ttol = time[-1] * tconv
time_dim = time * tconv
numb = int(brk*2 + 1)
time_dim2 = np.linspace(0., brk * tconv * t_mult,\
num=numb, endpoint=True) #increase the number of sites to check for ssq
#set up time based influent data
cinf = interp1d(time_dim, cin.values, fill_value='extrapolate')
cout_f = interp1d(time_dim, cout.values, fill_value='extrapolate')
#initialize storage arrays/matrices
n = (nc+1)*mc
y0 = np.zeros(n)
def diffun(t, y0):
nonlocal aau
y0tmp = y0.reshape((nc+1,mc))
ydot = np.zeros(y0tmp.shape)
#defines the influent concentration at time t
cinfl = cinf(t)
fac = 1.
z = ym * y0tmp[:nc,:mc] #* ym #updated ym should always be 1 for single comp.
qte = z
yt0 = xni * z
z_c = z/qte
z[qte>0.] = z_c[qte>0.] # should be 1 for single component.
q0 = yt0 * xn/ym
q0[np.logical_not(np.isfinite(q0))] = 0.
z[np.logical_not(np.isfinite(z))] = 0.
cpore = z * q0**xni # * (molar_k / molar_k_t)**xni
cpore[np.logical_or.reduce((qte<=0.,\
yt0<=0,\
xni*np.log10(q0)<-20,\
cpore==np.nan
))] = 0.
cpore[np.isinf(cpore)] = 1.
cpore_tmp = cpore[nc-1]
cpore_tmp[cpore_tmp < 0.] = 0.
cbs = stdv*(y0tmp[nc]-cpore_tmp)
cbs[0] = 0.
bb = fac * np.dot(bedp, cpore) + np.dot(beds,y0tmp[:nc,:])
ww = np.dot(wr[:nd], bb)
ydot[:nd,:] = bb
ydot[nc-1][0] = (stdv*dgI*(cinfl - cpore[nc-1][0]) - ww[0]) / wr[nc-1] #iii
ydot[nc-1][1:] = (cbs[1:]*dgI - ww[1:])/wr[nc-1]
aau[1:] = (np.dot(az[1:,1:],y0tmp[-1,1:]))
ydot[-1,1:] = (-dgt*(az[:,0]*cinfl + aau) - 3.* cbs)[1:] #dgt was changed from dg1
ydot = ydot.reshape((nc+1)*(mc))
return ydot
try:
y = solve_ivp(diffun,\
(0, ttol),\
y0, \
method=self.solver,\
max_step=tstep/3,\
)
# defines interpolating function of predicted effluent
cp_tmp = y.y[-1]
cp_tmp[cp_tmp < 0.] = 0.#sets negative values to 0.
cp_tmp[cp_tmp > np.max(cin)*3.] = np.max(cin)*3. #sets the max to 5x cb0
cp = interp1d(y.t, cp_tmp, fill_value='extrapolate')
except Exception:# as e:
t_temp = np.linspace(0, ttol, 20)
cp_tmp = np.zeros(20)
cp = interp1d(t_temp, cp_tmp, fill_value='extrapolate')
return cp
def run_fit(ds_mult):
#passes a ds_multiplier, rather than ds directly
cp = run(ds_mult)
# cp = run(ds_mult, timeout=self.timeout)
ssq = ((cout_f(time_dim2)-cp(time_dim2))**2).sum()
return ssq
if self.optimize_flag:
#reuse test_range
test_range = 10**(1-self.test_range)
ssqs = pd.Series([run_fit(i) for i in test_range], \
index=test_range)
min_val = ssqs[ssqs==ssqs.min()]
best_val_ds = min_val.index[0] * ds_base
best_fit = run(min_val.index[0])
min_val = min_val.values[0]
writer = pd.ExcelWriter('ssq_'+self.carbon+'-'+compound+'.xlsx')
ssqs.to_excel(writer, 'Sheet1')
writer.save()
else: #assume test_range and xn_range are single values
best_val_ds = self.test_range[0] * ds_base
best_fit = run(best_val_ds)
# best_fit = run(best_val_ds, timeout=self.timeout)
min_val = 1e2 #run_fit(best_val_k, best_val_xn)
ssqs = pd.Series(min_val, index=[best_val_ds])
itp = np.arange(0., ttol+tstep, tstep)
output_fit = interp1d(itp/tconv, \
best_fit(itp) * cb0 * mw / \
self.mass_mul, \
fill_value='extrapolate')
writer = pd.ExcelWriter(self.project_name+'_'+compound+'-'+self.carbon+'.xlsx')
model_data = pd.DataFrame(output_fit(itp/tconv), \
columns = ['data'], \
index = itp/tconv/t_mult)
model_data.to_excel(writer, 'model_fit')
inf.to_excel(writer, 'influent')
eff.to_excel(writer, 'effluent')
data_tmp = pd.Series([sc, self.re, difl, kf_v, self.k_data[compound]['K'],\
self.k_data[compound]['1/n'], dp_v, \
best_val_ds, min_val, self.ebct, self.sf], \
index = ['Sc','Re','difl','kf','K','1/n','dp',\
'ds','ssq','ebct','sf'])
data_tmp.to_excel(writer, 'parameters')
if self.optimize_flag:
ti.sleep(1)
writer.save()
return compound, best_val_ds, ssqs, model_data, ds_base
#end dsfit
def run_all(self, plot=False, save_file=True, optimize='staged',
init_grid=5, init_loop=3):
'''
Parameters
----------
plot : BOOL, optional
Are plots generated during this function. The default is False.
save_file : BOOL, optional
Are results files generated during this function. The default is True.
optimize : string, optional
'brute' or 'staged' are acceptable options. The default is 'staged'.
init_grid : INT, optional
Number of grid points for staged optimizer. The default is 5.
init_loop : INT, optional
Number of refinement loops for staged optimizer. The default is 3.
Current Behavior
----------------
All files will be overwritten. No file checking is currently performed.
Returns
-------
None. Optimized k & 1/n values are saved to Class object,
and can be saved in script that calls this function.
'''
#forces single run to handle optimizing in this funciton, not run_psdm_kfit
opt_flg = self.optimize_flag
orig_test_range = self.test_range * 1.
orig_xn_range = self.xn_range * 1.
self.optimize_flag = False
xn_rng_tmp = np.zeros(2)
k_rng_tmp = np.zeros(2)
des_xn = 0.025 # search accuracy for xn
des_k = 0.1 # search accuracy for k multiplier
if optimize == 'brute':
init_loop = 0
k_range = self.test_range
xn_range = self.xn_range
for compound in self.compounds:
print(compound, ' running')
k_val = self.k_data[compound]['K']
q_val = self.k_data[compound]['q']
inf = self.data_df[self.influent][compound]
eff = self.data_df[self.carbon][compound]
xn_range1 = orig_xn_range * 1.
k_range1 = orig_test_range * 1.
grid_num_xn = init_grid * 1
grid_num_k = init_grid * 1
for loop in range(init_loop + 1):
data_store = [] #reset
k_mult = {}
models = {}
if optimize == 'staged':
if loop == init_loop:
# expected different search space for final loop
xn_rng_tmp[0] = np.floor(xn_rng_tmp[0] / des_xn)
xn_rng_tmp[1] = np.ceil(xn_rng_tmp[1] / des_xn)
grid_num_xn = int(xn_rng_tmp.ptp() + 1)
xn_range1 = xn_rng_tmp * des_xn
if xn_range1[1] > orig_xn_range[-1]:
#corrects for overrun of xn_range
#may need to do the same for underrun of xn_rng
xn_range1[1] = orig_xn_range[-1]
grid_num_xn -= 1
k_rng_tmp[0] = np.floor(k_rng_tmp[0] / des_k)
k_rng_tmp[1] = np.ceil(k_rng_tmp[1] / des_k)
grid_num_k = int(k_rng_tmp.ptp() + 1)
k_range1 = k_rng_tmp * des_k
xn_range, k_range = generate_grid(grid_num_xn,
grid_num_k,
loop_num=loop,
xn_range=xn_range1,
k_range=k_range1)
for xns in xn_range:
k_mult[xns] = recalc_k(k_val, q_val, self.xn, xns)
for k in k_range:
models[k] = {}
for xn in xn_range:
self.test_range = np.array([k*k_mult[xn]])
self.xn_range = np.array([xn])
comp, k_v, xn_v, ssqs, md = self.run_psdm_kfit(compound)
data_store.append([k, xn_v, ssqs.values[0][0]])
models[k][xn] = md
data_pd = pd.DataFrame(data_store, columns=['K','1/n','ssq'])
ssqs = pd.pivot_table(data_pd,values=['ssq'],
index=['K'],
columns=['1/n'],
aggfunc=np.max,
)['ssq']
min_val = find_minimum_df(ssqs)
xn_grid = xn_range[1]-xn_range[0]
k_grid = k_range[1]-k_range[0]
min_xn = min_val.columns[0]
min_k = min_val.index[0]
idx_xn = xn_range.tolist().index(min_xn)
idx_k = k_range.tolist().index(min_k)
max_idx_xn = len(xn_range) - 1
max_idx_k = len(k_range) - 1
if idx_xn == 0: #close to min xn side
xn_rng_tmp[0] = xn_range[0]
xn_rng_tmp[1] = xn_range[1]
elif idx_xn == max_idx_xn: # close to max xn side
xn_rng_tmp[0] = xn_range[-2]
xn_rng_tmp[1] = xn_range[-1]
else: #middle of search space
xn_rng_tmp[0] = xn_range[idx_xn-1]
xn_rng_tmp[1] = xn_range[idx_xn+1]
if idx_k == 0: #close to min k side
k_rng_tmp[0] = k_range[0]
k_rng_tmp[1] = k_range[1]
elif idx_k == max_idx_k: # close to max k side
k_rng_tmp[0] = k_range[-2]
k_rng_tmp[1] = k_range[-1]
else: #middle of search space
k_rng_tmp[0] = k_range[idx_k-1]
k_rng_tmp[1] = k_range[idx_k+1]
if xn_grid < des_xn:
#can reduce search space
grid_num_xn = 3
# xn_grid = des_xn * 1 # might be unnecessary
if idx_xn == max_idx_xn:
grid_num_xn = 2
xn_rng_tmp[0] = xn_rng_tmp[1] - des_xn
elif idx_xn == 0:
grid_num_xn = 2
xn_rng_tmp[1] = xn_rng_tmp[0] + des_xn
if k_grid < des_k:
#can reduce seach space
grid_num_k = 3
# k_grid = des_k * 1. # might be unnecessary
if idx_k == max_idx_k:
grid_num_k = 2
k_rng_tmp[0] = k_rng_tmp[1] - des_k
elif idx_k == 0:
grid_num_k = 2
k_rng_tmp[1] = k_rng_tmp[0] + des_k
xn_range1 = xn_rng_tmp * 1.
k_range1 = k_rng_tmp * 1.
min_val = find_minimum_df(ssqs)
best_val_xn = min_val.columns[0]
best_val_k = min_val.index[0] * k_mult[best_val_xn]
md = models[min_val.index[0]][best_val_xn]
min_val = min_val.values[0][0]
if plot:
plt.figure()
plt.plot(inf.index, inf.values, marker='x', label='influent')
plt.plot(eff.index, eff.values, marker='o', label='effluent')
#plot model results
plt.plot(md.index, md.values,\
label = repr(round(best_val_k,3))+\
' - ' + repr(round(best_val_xn, 3)))
plt.legend()
plt.title(compound+' - '+self.carbon)
plt.savefig(self.carbon+'_'+compound+'.png',dpi=300)
plt.close()
plt.figure()
ssqs[ssqs>np.percentile(ssqs, 25)] = np.percentile(ssqs, 25)
plt.contourf(ssqs.columns, ssqs.index, ssqs.values)
plt.title(compound+' - '+self.carbon)
plt.savefig('ssq_'+self.carbon+'_'+compound+'.png',dpi=300)
plt.close()
if save_file:
writer = | pd.ExcelWriter('ssq_'+self.carbon+'-'+compound+'.xlsx') | pandas.ExcelWriter |
"""
Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an
express license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
import os
import sys
import time
import pickle
import copy
import importlib
import pandas as pd
import torch
import torch.multiprocessing as mp
import federated
from server import GlobalServer
from federated.args import args
from federated.utils import *
from federated.configs import cfg_fl as cfg, assert_and_infer_cfg_fl
from federated.eval import *
def save_client_metrics(clients, training_metrics_path):
df_clients = []
for client in clients:
df_clients.append(pd.DataFrame(client.metrics))
# Save client data
pd.concat(df_clients, ignore_index=True).to_csv(training_metrics_path, index=False)
del df_clients
torch.cuda.empty_cache()
print(f'Saved training metrics to {training_metrics_path}!')
# Enable CUDNN Benchmarking optimization
if args.deterministic:
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
"""
Run federated learning
"""
cfg = assert_and_infer_cfg_fl(cfg, args, make_immutable=False)
args.ngpu = torch.cuda.device_count()
args.device = torch.device(f'cuda:{args.device}') if args.device is not None and torch.cuda.is_available else torch.device('cpu')
print(f'> Num clients per update (if e-greedy): {cfg.CLIENT_WEIGHT.NUM_UPDATE_CLIENTS}')
print(f'> Training method: {cfg.FEDERATION.METHOD}')
if cfg.FEDERATION.METHOD == 'fomo':
if cfg.CLIENT_WEIGHT.METHOD == 'e_greedy':
cm = 'eg'
elif cfg.CLIENT_WEIGHT.METHOD == 'sub_federations':
cm = 'sf'
if cfg.CLIENT_WEIGHT.BASELINE == 'first_model':
baseline_name = 'fm'
elif cfg.CLIENT_WEIGHT.BASELINE == 'last_model':
baseline_name = 'lm'
elif cfg.CLIENT_WEIGHT.BASELINE == 'model_avg':
baseline_name = 'ma'
experiment_params = f'-fe={cfg.FEDERATION.EPOCH}-cm={cm}-b={baseline_name}-wd={cfg.CLIENT_WEIGHT.WEIGHT_DELTA}-nuc={cfg.CLIENT_WEIGHT.NUM_UPDATE_CLIENTS}-we={cfg.CLIENT_WEIGHT.EPSILON}-wed={cfg.CLIENT_WEIGHT.EPSILON_DECAY}-scw={args.softmax_client_weights}'
if args.softmax_model_deltas:
experiment_params += '-sd'
if args.num_federations is not None:
experiment_params += f'-nf={args.num_federations}'
if args.fedavg_rounds is not None:
experiment_params += f'-far={args.fedavg_rounds}'
if args.local_rounds is not None:
experiment_params += f'-lar={args.local_rounds}'
elif cfg.FEDERATION.METHOD == 'fedavg':
experiment_params = f'-fe={cfg.FEDERATION.EPOCH}'
elif cfg.FEDERATION.METHOD == 'local':
experiment_params = ''
if cfg.CLIENT.MANUAL:
lvr = 'man0'
else:
lvr = args.local_val_ratio
train_curve = 'g' if args.global_training_curve else 'l'
if cfg.TASK == 'semantic_segmentation':
task = 'segm'
elif cfg.TASK == 'classification':
task = 'class'
if args.num_adversaries > 0:
experiment_params += f'-na={args.num_adversaries}'
clients_arg = f'nc={args.num_clients}' if args.num_clients else f'cpd={cfg.FEDERATION.CLIENTS_PER_DIST}'
distributions_arg = f'su={args.shards_per_user}' if args.shards_per_user else f'nd={cfg.FEDERATION.NUM_DISTRIBUTIONS}'
ltvs = f'ltvs={args.local_train_val_size:}-' if args.local_train_val_size else ''
num_local_pooled = ''
if args.num_local_val_pooled > 0:
num_local_pooled += f'nlvp={args.num_local_val_pooled}-'
if args.num_local_train_pooled > 0:
num_local_pooled += f'nltp={args.num_local_train_pooled}-'
args.experiment_name = f'm={cfg.FEDERATION.METHOD}-d={cfg.DATASET.DATASET_NAME}-{distributions_arg}-{clients_arg}-rd={cfg.FEDERATION.RANDOM_DISTS}-ts={cfg.CLIENT.TRAIN_SPLIT}-{ltvs}me={args.max_epoch}-arch={args.arch}-lr={args.lr}-lrd={args.learning_rate_decay}-mo={args.momentum}-o={args.optimizer}-bst={args.bs_trn}-bsv={args.bs_val}{experiment_params}-ds={args.data_seed}-s={cfg.SEED}-r={args.replicate}'
if args.eval_distribution_test:
args.experiment_name = args.experiment_name + '-edt'
if args.local_val_model_delta_ratio:
args.experiment_name = args.experiment_name + f'-lvmdr={args.local_val_model_delta_ratio}'
if args.eval_by_class:
args.experiment_name = args.experiment_name + '-ebc'
if args.local_upper_bound:
args.experiment_name = args.experiment_name + '-lub'
if args.pathological_non_iid:
args.experiment_name = args.experiment_name + '-pniid'
if args.shuffle_targets:
args.experiment_name = args.experiment_name + '-st'
if args.fedprox:
if args.fedprox_mu is not None:
args.experiment_name = args.experiment_name + f'-fp{args.fedprox_mu}'
if args.fed_momentum:
args.experiment_name = args.experiment_name + f'-fmg={args.fed_momentum_gamma}'
if args.fed_momentum_nesterov:
args.experiment_name += f'-nag'
if args.enable_dp:
args.experiment_name += f'-na={args.n_accumulation_steps}-sd={args.sigma}-C={args.max_per_sample_grad_norm}-d={args.delta}'
if args.virtual_batch_rate is not None:
args.experiment_name += f'-vbr={args.virtual_batch_rate}'
args.bs_trn_v = args.bs_trn
args.bs_val_v = args.bs_val
args.bs_trn = int(args.bs_trn / args.virtual_batch_rate)
args.bs_val = int(args.bs_val / args.virtual_batch_rate)
print(f'Virtual train batch size: {args.bs_trn_v} | Virtual val batch size: {args.bs_val_v}')
print(f'Train batch size: {args.bs_trn} | Val batch size: {args.bs_val}')
# Save model paths
args.model_path = os.path.join(cfg.MODEL_DIR, cfg.DATASET.DATASET_NAME)
try:
os.mkdir(args.model_path)
except FileExistsError:
pass
args.model_path = os.path.join(args.model_path, f'replicate-{args.replicate}')
try:
os.mkdir(args.model_path)
except FileExistsError:
pass
# Saving results
print(f'> Save name: {args.experiment_name}')
save_dir = os.path.join(cfg.RESULTS_DIR, f'replicate-{args.replicate}')
try:
os.mkdir(save_dir)
except FileExistsError:
pass
print(f'> Saving files to {save_dir}...')
# Finally log them:
sys.stdout = Logger(args=args)
summarize_args(args=args, as_script=True)
training_metrics_path = os.path.join(save_dir, f'client_train-{args.experiment_name}.csv')
test_metrics_path = os.path.join(save_dir, f'client_test-{args.experiment_name}.csv')
embeddings_path = os.path.join(save_dir, f'server_embeddings-{args.experiment_name}.csv')
print_header(f'>>> Number of GPUs available: {args.ngpu}')
# Modularity among datasets
dataset_module = importlib.import_module('federated_datasets.{}'.format(cfg.DATASET.DATASET_NAME))
if cfg.TASK == 'classification':
population = getattr(dataset_module, 'Population')()
else:
raise NotImplementedError
# Initialize global server
if cfg.FEDERATION.FED_AVERAGING:
server = GlobalServer(population=population, num_federations=1)
elif args.federation_method == 'fomo' and args.num_federations is not None:
server = GlobalServer(population=population, num_federations=args.num_federations)
else:
server = GlobalServer(population=population)
server.cfg = cfg # Pass asserted and inferred configs to server
evals_setup = False
# If True, show performance on the global in-distribution test set during training
if args.eval_distribution_test:
setup_eval_datasets(population, limit_train_size=False)
evals_setup = True
for ix, dist in enumerate(population.distributions):
for cix, client in enumerate(dist['clients']):
client.datasets[2] = population.test_datasets[ix] # Assign the distribution test set to client's test set
if args.local_upper_bound and args.federation_method == 'local':
if evals_setup:
pass
else:
setup_eval_datasets(population, limit_train_size=False)
for ix, dist in enumerate(population.distributions):
for cix, client in enumerate(dist['clients']):
client.datasets[0] = population.train_datasets[ix]
# Evaluation
if args.evaluate:
# Evaluate federated models
if evals_setup:
pass
else:
setup_eval_datasets(population, limit_train_size=False)
args.federation_round = -1
evaluate_federated_models(server)
federated_metrics_path = os.path.join(save_dir, f'fed_test-{args.experiment_name}-elfs={args.eval_local_finetune_size}.csv')
pd.DataFrame(server.eval_metrics).to_csv(federated_metrics_path, index=False)
print(f'Saved federated test metrics to {federated_metrics_path}!')
sys.exit()
###################
# Actual Training #
###################
# Save client-to-client weight matrices
client_weight_matrices = []
all_activated_clients = []
np.random.seed(cfg.SEED) # stan reproducibility
for epoch in range(args.max_epoch):
server.round = epoch
args.federation_round = epoch
# First round, everyone locally train
if epoch == 0 and args.federating_ratio < 1 and args.federation_method == 'fomo':
print('> First round initializing all client models...')
server.local_eval([population.clients[0]], epoch * args.federation_epoch) # Evaluate the models at the start
server.local_train(population.clients, epoch * args.federation_epoch, num_epochs=1) # Train and record fine-tuned number
# Randomly select subset to upload to server
m = max(int(args.federating_ratio * population.num_clients), 1)
client_indices = np.random.choice(range(population.num_clients), m, replace=False)
federating_clients = [population.clients[ix] for ix in client_indices]
server.uploaded_clients = copy.deepcopy(federating_clients)
continue
else:
print('> Selecting subset of active models...')
np.random.seed(cfg.SEED)
m = max(int(args.federating_ratio * population.num_clients), 1)
client_indices = np.random.choice(range(population.num_clients), m, replace=False)
if args.debugging:
print_debug(client_indices, 'selected clients')
if args.fedavg_rounds is not None and epoch < args.fedavg_rounds:
args.federation_method = 'fedavg'
cfg.FEDERATION.METHOD = 'fedavg'
cfg.FEDERATION.FED_AVERAGING = True
elif args.fedavg_rounds is not None and epoch >= args.fedavg_rounds:
args.federation_method = 'fomo'
cfg.FEDERATION.METHOD = 'fomo'
cfg.FEDERATION.FED_AVERAGING = False
if args.local_rounds is not None and epoch < args.local_rounds:
args.federation_method = 'local'
cfg.FEDERATION.METHOD = 'local'
cfg.FEDERATION.FED_AVERAGING = False
elif args.local_rounds is not None and epoch >= args.local_rounds:
args.federation_method = 'fomo'
cfg.FEDERATION.METHOD = 'fomo'
cfg.FEDERATION.FED_AVERAGING = False
if cfg.FEDERATION.METHOD == 'local' and args.debugging:
client_indices = sorted(client_indices)
federating_clients = [population.clients[ix] for ix in client_indices]
print('Federating Clients:', [f.id for f in federating_clients])
server.last_active_clients = federating_clients
for client in federating_clients:
client.last_active_round = epoch # Update last active round
if client not in all_activated_clients: # <- 8/6, not sure how useful all activated clients should be
all_activated_clients.append(client)
if cfg.FEDERATION.METHOD == 'local':
server.local_eval(federating_clients, epoch * args.federation_epoch) # Evaluate the models at the start
server.local_train(federating_clients, epoch * args.federation_epoch)
save_client_metrics(population.clients, training_metrics_path)
pd.DataFrame(server.client_eval_metrics).to_csv(test_metrics_path, index=False)
if args.debugging:
sys.exit() # Early stop for debugging
# Decay learning rate
server.args.lr = np.max([args.min_learning_rate, server.args.lr * args.learning_rate_decay])
if args.local_rounds is not None and epoch == args.local_rounds - 1:
server.uploaded_clients = copy.deepcopy(federating_clients) # Prepare for next round
continue
print_header('****~~Federating part~~****')
federating_clients = server.initialize_clients(epoch, selected_clients=federating_clients)
for client in federating_clients:
client.save_first_model()
server.local_eval(federating_clients, epoch * args.federation_epoch) # Evaluate the models at the start
server.local_train(federating_clients, epoch * args.federation_epoch) # Train and record fine-tuned number
# Make sure server.population.clients is updated to match federating_clients
for federating_client_ix, population_client_ix in enumerate(client_indices):
server.population.clients[population_client_ix] = federating_clients[federating_client_ix]
for client in federating_clients:
client.participated = True
# Save models after local training - use to analyze the divergence
torch.save(client.model.state_dict(), f'./models/m_c{client.id}_d{client.dist_id}_e{epoch}-{args.experiment_name}.pt')
server.update_server_models(federating_clients, all_activated_clients)
# Save data
save_client_metrics(population.clients, training_metrics_path)
| pd.DataFrame(server.client_eval_metrics) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# This notebook analyzes the LVs driving the association of Niacin with some cardiovascular traits. Then it writes a table in markdown with the results.
# %% [markdown] tags=[]
# # Modules loading
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
from pathlib import Path
import re
import numpy as np
import pandas as pd
from entity import Gene
import conf
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
QUANTILE = 0.95
# %% [markdown] tags=[]
# # Paths
# %% tags=[]
OUTPUT_DIR = conf.RESULTS["DRUG_DISEASE_ANALYSES"] / "lincs" / "analyses"
display(OUTPUT_DIR)
OUTPUT_DIR.mkdir(exist_ok=True, parents=True)
# %%
INPUT_DIR = conf.RESULTS["DRUG_DISEASE_ANALYSES"] / "lincs" / "predictions"
# display(OUTPUT_DIR)
# OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
input_predictions_by_tissue_file = INPUT_DIR / "full_predictions_by_tissue-rank.h5"
display(input_predictions_by_tissue_file)
# %%
assert "CONTENT_DIR" in conf.MANUSCRIPT
OUTPUT_FILE_PATH = conf.MANUSCRIPT["CONTENT_DIR"] / "04.15.drug_disease_prediction.md"
display(OUTPUT_FILE_PATH)
assert OUTPUT_FILE_PATH.exists()
# %% [markdown] tags=[]
# # Data loading
# %% [markdown] tags=[]
# ## PharmacotherapyDB: load gold standard
# %% [markdown]
# ### Final
# %%
gold_standard = pd.read_pickle(
Path(conf.RESULTS["DRUG_DISEASE_ANALYSES"], "gold_standard.pkl"),
)
# %%
gold_standard.shape
# %%
gold_standard.head()
# %% [markdown]
# ### Info
# %% tags=[]
input_file = conf.PHARMACOTHERAPYDB["INDICATIONS_FILE"]
display(input_file)
# %%
gold_standard_info = pd.read_csv(input_file, sep="\t")
# %%
gold_standard_info = gold_standard_info.rename(columns={"drug": "drug_name"})
# %% tags=[]
gold_standard_info.shape
# %% tags=[]
gold_standard_info.head()
# %%
gold_standard_info = (
gold_standard.set_index(["trait", "drug"])
.join(
gold_standard_info.rename(
columns={"doid_id": "trait", "drugbank_id": "drug"}
).set_index(["trait", "drug"])
)
.reset_index()
)
# %% tags=[]
gold_standard_info.shape
# %% tags=[]
gold_standard_info.head()
# %% [markdown] tags=[]
# ## LINCS data
# %% tags=[]
input_file = Path(
conf.RESULTS["DRUG_DISEASE_ANALYSES"], "lincs", "lincs-data.pkl"
).resolve()
display(input_file)
# %% tags=[]
lincs_data = pd.read_pickle(input_file).T.rename(columns=Gene.GENE_ID_TO_NAME_MAP)
# %% tags=[]
display(lincs_data.shape)
# %% tags=[]
display(lincs_data.head())
# %% [markdown] tags=[]
# ## LINCS projection
# %% tags=[]
input_file = Path(
conf.RESULTS["DRUG_DISEASE_ANALYSES"], "lincs", "lincs-projection.pkl"
).resolve()
display(input_file)
# %% tags=[]
lincs_projection = pd.read_pickle(input_file).T
# %% tags=[]
display(lincs_projection.shape)
# %% tags=[]
display(lincs_projection.head())
# %% [markdown]
# # Niacin and cardiovascular diseases
# %%
from entity import Trait
# %%
Trait.get_traits_from_efo("atherosclerosis")
# %%
Trait.get_traits_from_efo("coronary artery disease")
# %%
_phenomexcan_traits = [
"I70-Diagnoses_main_ICD10_I70_Atherosclerosis",
"CARDIoGRAM_C4D_CAD_ADDITIVE",
"I25-Diagnoses_main_ICD10_I25_Chronic_ischaemic_heart_disease",
"20002_1473-Noncancer_illness_code_selfreported_high_cholesterol",
"6150_100-Vascularheart_problems_diagnosed_by_doctor_None_of_the_above",
"6150_1-Vascularheart_problems_diagnosed_by_doctor_Heart_attack",
"I9_CHD-Major_coronary_heart_disease_event",
"I9_CORATHER-Coronary_atherosclerosis",
"I9_IHD-Ischaemic_heart_disease_wide_definition",
"I9_MI-Myocardial_infarction",
"I21-Diagnoses_main_ICD10_I21_Acute_myocardial_infarction",
"20002_1075-Noncancer_illness_code_selfreported_heart_attackmyocardial_infarction",
]
_drug_id = "DB00627"
_drug_name = "Niacin"
# %%
for p in _phenomexcan_traits:
print(p)
d = Trait.get_trait(full_code=p)
print((d.n, d.n_cases))
print("\n")
# %% [markdown]
# ## Get best tissue results for Niacin
# %%
drugs_tissue_df = {}
with pd.HDFStore(input_predictions_by_tissue_file, mode="r") as store:
for tk in store.keys():
df = store[tk][_drug_id]
drugs_tissue_df[tk[1:]] = df
# %%
_tmp = pd.DataFrame(drugs_tissue_df)
display(_tmp.shape)
display(_tmp.head())
# %%
# show top tissue models (from TWAS) for each trait
traits_best_tissues_df = (
pd.DataFrame(drugs_tissue_df).loc[_phenomexcan_traits].idxmax(1)
)
display(traits_best_tissues_df)
# %%
# pick the tissue with the maximum score for each trait
drug_df = pd.DataFrame(drugs_tissue_df).max(1)
# %%
drug_df.shape
# %%
drug_df.head()
# %%
drug_df.loc[_phenomexcan_traits].sort_values()
# %%
drug_df.describe()
# %%
drug_mean, drug_std = drug_df.mean(), drug_df.std()
display((drug_mean, drug_std))
# %%
drug_df_std = (drug_df - drug_mean) / drug_std
drug_df_stats = drug_df_std.describe()
display(drug_df_stats)
# %%
drug_df_std.quantile([0.80, 0.85, 0.90, 0.95])
# %%
drug_df = (drug_df.loc[_phenomexcan_traits] - drug_mean) / drug_std
# %%
drug_df.shape
# %%
drug_df.sort_values()
# %% [markdown]
# All predictions of Niacin for these traits are high (above the mean and a standard deviation away)
# %%
# select traits for which niacin has a high prediction
selected_traits = drug_df[drug_df > drug_df_stats["75%"]].index.tolist()
# %%
selected_traits
# %% [markdown]
# ## Gene module-based - LVs driving association
# %%
def find_best_tissue(trait_id):
return traits_best_tissues_df.loc[trait_id]
# %%
_tmp_res = find_best_tissue("I9_CORATHER-Coronary_atherosclerosis")
display(_tmp_res)
# %%
# available_doids = set(predictions_by_tissue["trait"].unique())
traits_lv_data = []
for trait in selected_traits:
best_module_tissue = find_best_tissue(trait)
display(best_module_tissue)
best_module_tissue_data = pd.read_pickle(
conf.RESULTS["DRUG_DISEASE_ANALYSES"]
/ "spredixcan"
/ "proj"
/ f"spredixcan-mashr-zscores-{best_module_tissue}-projection.pkl"
)[trait]
traits_lv_data.append(best_module_tissue_data)
# %%
module_tissue_data = pd.DataFrame(traits_lv_data).T
# %%
module_tissue_data.shape
# %%
module_tissue_data.head()
# %%
drug_data = lincs_projection.loc[_drug_id]
# %%
drug_data.head()
# %%
_tmp = (-1.0 * drug_data.dot(module_tissue_data)).sort_values(ascending=False)
display(_tmp)
# %%
drug_trait_predictions = pd.DataFrame(
-1.0 * (drug_data.to_frame().values * module_tissue_data.values),
columns=module_tissue_data.columns.copy(),
index=drug_data.index.copy(),
)
# %%
drug_trait_predictions.shape
# %%
drug_trait_predictions.head()
# %%
common_lvs = []
for c in drug_trait_predictions.columns:
d = Trait.get_trait(full_code=c)
display(f"Name: {d.description}")
display(f"Sample size: {(d.n, d.n_cases)}")
_tmp = drug_trait_predictions[c]
_tmp = _tmp[_tmp > 0.0]
q = _tmp.quantile(QUANTILE)
_tmp = _tmp[_tmp > q]
display(f"Number of LVs: {_tmp.shape[0]}")
_tmp = (
_tmp.sort_values(ascending=False)
.rename("lv_diff")
.reset_index()
.rename(columns={"index": "lv"})
)
_tmp = _tmp.assign(trait=c)
common_lvs.append(_tmp)
display(_tmp.head(20))
print()
# %% [markdown]
# # Get common LVs
# %%
common_lvs_df = pd.concat(common_lvs) # .rename(columns={"index": "lv", 0: "value"})
# %%
common_lvs_df.shape
# %%
common_lvs_df.head()
# %%
lvs_by_count = (
common_lvs_df.groupby("lv")["lv_diff"]
.count()
.squeeze()
.sort_values(ascending=False)
)
display(lvs_by_count.head(25))
# %%
lvs_sel = []
# %%
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.max_colwidth", None
):
lv_df = common_lvs_df[common_lvs_df["lv"] == "LV116"].sort_values(
"lv_diff", ascending=False
)
display(lv_df)
lvs_sel.append(lv_df)
# %%
lv_df = common_lvs_df[common_lvs_df["lv"] == "LV931"].sort_values(
"lv_diff", ascending=False
)
display(lv_df)
lvs_sel.append(lv_df)
# %%
lv_df = common_lvs_df[common_lvs_df["lv"] == "LV246"].sort_values(
"lv_diff", ascending=False
)
display(lv_df)
lvs_sel.append(lv_df)
# %%
lv_df = pd.concat(lvs_sel, ignore_index=True)
display(lv_df.head())
# %%
from traits import SHORT_TRAIT_NAMES
# %%
def get_trait_objs(phenotype_full_code):
if Trait.is_efo_label(phenotype_full_code):
traits = Trait.get_traits_from_efo(phenotype_full_code)
else:
traits = [Trait.get_trait(full_code=phenotype_full_code)]
# sort by sample size
return sorted(traits, key=lambda x: x.n_cases / x.n, reverse=True)
def get_trait_description(phenotype_full_code):
traits = get_trait_objs(phenotype_full_code)
desc = traits[0].description
if desc in SHORT_TRAIT_NAMES:
return SHORT_TRAIT_NAMES[desc]
return desc
def get_trait_n(phenotype_full_code):
traits = get_trait_objs(phenotype_full_code)
return traits[0].n
def get_trait_n_cases(phenotype_full_code):
traits = get_trait_objs(phenotype_full_code)
return traits[0].n_cases
def num_to_int_str(num):
if | pd.isnull(num) | pandas.isnull |
import pandas as pd
from pandas.testing import assert_frame_equal
from dask_sql._compat import INT_NAN_IMPLEMENTED
def test_filter(c, df):
return_df = c.sql("SELECT * FROM df WHERE a < 2")
return_df = return_df.compute()
expected_df = df[df["a"] < 2]
assert_frame_equal(return_df, expected_df)
def test_filter_scalar(c, df):
return_df = c.sql("SELECT * FROM df WHERE True")
return_df = return_df.compute()
expected_df = df
assert_frame_equal(return_df, expected_df)
return_df = c.sql("SELECT * FROM df WHERE False")
return_df = return_df.compute()
expected_df = df.head(0)
assert_frame_equal(return_df, expected_df, check_index_type=False)
return_df = c.sql("SELECT * FROM df WHERE (1 = 1)")
return_df = return_df.compute()
expected_df = df
assert_frame_equal(return_df, expected_df)
return_df = c.sql("SELECT * FROM df WHERE (1 = 0)")
return_df = return_df.compute()
expected_df = df.head(0)
assert_frame_equal(return_df, expected_df, check_index_type=False)
def test_filter_complicated(c, df):
return_df = c.sql("SELECT * FROM df WHERE a < 3 AND (b > 1 AND b < 3)")
return_df = return_df.compute()
expected_df = df[((df["a"] < 3) & ((df["b"] > 1) & (df["b"] < 3)))]
assert_frame_equal(
return_df, expected_df,
)
def test_filter_with_nan(c):
return_df = c.sql("SELECT * FROM user_table_nan WHERE c = 3")
return_df = return_df.compute()
if INT_NAN_IMPLEMENTED:
expected_df = pd.DataFrame({"c": [3]}, dtype="Int8")
else:
expected_df = pd.DataFrame({"c": [3]}, dtype="float")
assert_frame_equal(
return_df, expected_df,
)
def test_string_filter(c, string_table):
return_df = c.sql("SELECT * FROM string_table WHERE a = 'a normal string'")
return_df = return_df.compute()
assert_frame_equal(
return_df, string_table.head(1),
)
def test_filter_datetime(c):
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df["dt"] = | pd.to_datetime(df) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Script for assessing how the number of nulls used to generate a p-value
influences the p-value
"""
from collections import defaultdict
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from netneurotools import stats as nnstats
from parspin import simnulls, utils as putils
from parspin.plotting import savefig
plt.rcParams['svg.fonttype'] = 'none'
plt.rcParams['font.sans-serif'] = ['Myriad Pro']
plt.rcParams['font.size'] = 20.0
ROIDIR = Path('./data/raw/rois').resolve()
SIMDIR = Path('./data/derivatives/simulated').resolve()
OUTDIR = Path('./data/derivatives/supplementary/comp_nnulls')
FIGDIR = Path('./figures/supplementary/comp_nnulls')
SEED = 1234 # reproducibility
SIM = 9999 # which simulation was used to generate 10000 nulls
N_PVALS = 1000 # how many repeated draws should be done to calculate pvals
PLOTS = (
('vertex', 'fsaverage5'),
('atl-cammoun2012', 'scale500'),
('atl-schaefer2018', '1000Parcels7Networks')
)
PARCS, SCALES = zip(*PLOTS)
def pval_from_perms(actual, null):
""" Calculates p-value of `actual` based on `null` permutations
"""
return (np.sum(np.abs(null) >= np.abs(actual)) + 1) / (len(null) + 1)
def pval_by_subsets(parcellation, scale, spatnull, alpha):
"""
Parameters
----------
parcellation : str
Name of parcellation to be used
scale : str
Scale of `parcellation` to be used
spatnull : str
Name of spin method to be used
alpha : float
Spatial autocorrelation parameter to be used
Returns
-------
pvals : pd.DataFrame
"""
print(spatnull, alpha, parcellation, scale)
if spatnull == 'naive-para':
return
# load simulated data
alphadir = SIMDIR / alpha
if parcellation == 'vertex':
x, y = simnulls.load_vertex_data(alphadir, sim=SIM)
else:
x, y = simnulls.load_parc_data(alphadir, parcellation, scale, sim=SIM)
corr = nnstats.efficient_pearsonr(x, y, nan_policy='omit')[0]
perms = np.loadtxt(alphadir / parcellation / 'nulls' / spatnull / 'pvals'
/ f'{scale}_perms_{SIM}.csv')
orig = pval_from_perms(corr, perms)
pvals = defaultdict(list)
for subset in [100, 500, 1000, 5000]:
rs = np.random.default_rng(SEED)
for n in range(N_PVALS):
# select `subset` correlations from `perms` and calculate p-value
# store the p-value and repeat `N_PVALS` times
sub = rs.choice(perms, size=subset, replace=False)
pvals[subset].append(pval_from_perms(corr, sub) - orig)
# arrays are nicer than lists
pvals[subset] = np.asarray(pvals[subset])
df = pd.melt(pd.DataFrame(pvals), var_name='n_nulls', value_name='d(pval)')
# add single p-value generated from 10000 nulls
df = df.assign(
parcellation=parcellation,
scale=scale,
spatnull=spatnull,
alpha=alpha
)
return df[
'parcellation', 'scale', 'spatnull', 'alpha', 'n_nulls', 'd(pval)'
]
def run_analysis():
""" Runs p-value x n_nulls analysis
Returns
-------
pvals : pd.DataFrame
Data examining p-values based on number of nulls used
"""
OUTDIR.mkdir(parents=True, exist_ok=True)
fn = OUTDIR / 'nnulls_summary.csv'
if fn.exists():
return pd.read_csv(fn)
subsets = []
parcellations = putils.get_cammoun_schaefer(data_dir=ROIDIR)
for spatnull in simnulls.SPATNULLS:
for alpha in simnulls.ALPHAS:
if spatnull in simnulls.VERTEXWISE:
subsets.append(
pval_by_subsets('vertex', 'fsaverage5', spatnull, alpha),
)
for parcellation, annotations in parcellations.items():
for scale in annotations:
subsets.append(
pval_by_subsets(parcellation, scale, spatnull, alpha),
)
subsets = | pd.concat(subsets, ignore_index=True, sort=True) | pandas.concat |
"""
vocmaxlib
This python package calculates the maximum sting size for a photovoltaic
installation. The method is consistent with the NEC 2017 690.7 standard.
toddkarin
"""
import numpy as np
import pvlib
import pvlib.bifacial
# import nsrdbtools
# import socket
# import matplotlib
# matplotlib.use('TkAgg')
# import matplotlib.pyplot as plt
import pandas as pd
import datetime
import glob
import pytz
from vocmax import nsrdb
import tqdm
import os
import urllib
import pytz
import sys
import os
import warnings
from pvlib.iotools import get_psm3
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from vocmax.bifacial import pvfactors_timeseries
import glob
import vocmax
# from pvlib.bifacial import PVFactorsReportBuilder as PVFactorsReportBuilder
# Parameters entering into Voc calculation:
cec_modules = pvlib.pvsystem.retrieve_sam('CeCMod')
# Descriptions of hte various parameters used in the calculation.
explain = {
'Voco': 'Open circuit voltage at reference conditions, in V',
'Bvoco': 'Temperature dependence of open circuit voltage, in V/C',
'Mbvoc': """Coefficient providing the irradiance dependence of the
temperature coefficient of open circuit voltage, typically assumed to be
zero, in V/C
""",
'n_diode': 'Diode ideality factor, unitless',
'cells_in_series': 'Number of cells in series in each module, dimensionless',
'FD': """Fraction of diffuse irradiance arriving at the cell, typically
assumed to be 1, dimensionless
""",
'alpha_sc': """The short-circuit current temperature coefficient of the
module, in A/C
""",
'a_ref': """The product of the usual diode ideality factor (n_diode,
unitless), number of cells in series (cells_in_series), and cell thermal
voltage at reference conditions, in units of V.
""",
'I_L_ref': """The light-generated current (or photocurrent) at reference
conditions, in amperes.
""",
'I_o_ref': """The dark or diode reverse saturation current at reference
conditions, in amperes.
""",
'R_sh_ref': """The shunt resistance at reference conditions, in ohms.""",
'R_s': """The series resistance at reference conditions, in ohms.""",
'Isco': """Short circuit current at reference conditions, in amperes.""",
'Impo': """Maximum-power current at reference conditions, in amperes.""",
'Vmpo': """Maximum-power voltage at reference conditions, in volts.""",
'Pmpo': """Maximum-power power at reference conditions, in watts.""",
'Bisco': """Temperature coefficient of short circuit current, in A/C"""
}
def get_weather_data(lat, lon,
api_key,
cache_directory='cached_weather_data',
attributes='ghi,dhi,dni,wind_speed,air_temperature',
force_download=False,
full_name='<NAME>',
email='<EMAIL>',
affiliation='vocmax',
years=np.arange(1998, 2018.5),
interval=30,
):
"""
Retrieve weather data from the national solar radiation database (NSRDB).
Description
-----------
df, info = get_weather_data(lat,lon,api_key) gets weather data from the
NSRDB using the NSRDB api. Data download for a single location takes
around 3 minutes. Once weather data is downloaded, it is stored in a
local cache so it can be retrieved quickly. One sample point (lat=37.876,
lon=-122.247) is provided with the function so sample data can be easily
loaded without an api key.
Api keys are available free of charge at https://developer.nrel.gov/signup/
Note can only donwload data from NSRDB sequentially (not possible to
download data using multiple scripts in parallel).
Examples
--------
lat, lon = 37.876, -122.247
# Note: Replace with your api key
api_key = '<KEY>'
df, info = vocmax.get_weather_data(lat,lon,api_key)
Parameters
----------
lat : float or int
latitude in decimal degrees, between -90 and 90, north is positive
lon : float or int
longitude in decimal degrees, between -180 and 180, east is positive
api_key : str
NREL Developer Network API key
email : str
NREL API uses this to automatically communicate messages back
to the user only if necessary
names : str, default 'tmy'
PSM3 API parameter specifing year or TMY variant to download, see notes
below for options
interval : int, default 60
interval size in minutes, can only be either 30 or 60. Only used for
single-year requests (i.e., it is ignored for tmy/tgy/tdy requests).
leap_day : boolean, default False
include leap day in the results. Only used for single-year requests
(i.e., it is ignored for tmy/tgy/tdy requests).
full_name : str, default 'pvlib python'
optional
affiliation : str, default 'pvlib python'
optional
timeout : int, default 30
time in seconds to wait for server response before timeout
force_download : bool
If true, force downloading of weather data regardless of weather
that particular location has already been downloaded. Default is false.
tz_localize : bool
Weather to localize the time zone.
Returns
-------
df : pandas dataframe
Dataframe containing weather data with fields
'year' - year of row.
'month', 'day', 'hour', 'minute', 'dni', 'ghi', 'dhi',
'temp_air', 'wind_speed'.
info : dictionary
Dictionary containting information on the weather dataset.
"""
# First check if data exists in cahce directory.
if not force_download:
search_str = os.path.join(cache_directory,
'*_{:3.3f}_{:3.3f}.npz'.format(lat, lon))
print(search_str)
# One sample data point is provided with the package so that users don't
# have to get an api key to try it out.
if '{:3.3f}_{:3.3f}'.format(lat, lon) == '37.876_-122.247':
print('getting sample data point')
dir_path = os.path.dirname(os.path.realpath(__file__))
df, info = nsrdb.get_local_weather_data(
os.path.join(dir_path,
'123796_37.89_-122.26_search-point_37.876_-122.247.npz')
)
return df, info
# Otherwise search the cache for a weather data file that has already
# been downloaded.
filename = glob.glob(search_str)
if len(filename) > 0:
# Cached weather data found, load it
df, info = nsrdb.get_local_weather_data(filename[0])
# TODO: Add checks that the loaded file has the same options as in the function call.
return df, info
else:
# No cached weather data found.
pass
# Pull data from NSRDB because either force_download=True or no cached datafile found.
print('Downloading weather data and saving to "cached_weather_data" ...')
for j in tqdm.tqdm(range(len(years))):
year = '{:.0f}'.format(years[j])
info_iter, df_iter = get_psm3(
latitude=lat,
longitude=lon,
api_key=api_key,
email=email,
names=year,
interval=30,
leap_day=False,
full_name=full_name,
affiliation=affiliation,
timeout=30)
#
# # Declare url string
# url = 'http://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(
# year = year, lat = lat, lon = lon, leap = leap_year, interval = interval,
# utc = utc, name = your_name, email = your_email,
# mailing_list = mailing_list, affiliation = your_affiliation,
# reason = reason_for_use, api = api_key, attr = attributes)
#
# # file_name, urllib.request.urlretrieve(url, "testfile.txt")
# with urllib.request.urlopen(url) as f:
# # Get the data as a string.
# response = f.read().decode('utf-8')
#
# # Read the first few lines to get info on datafile
# info_df = pd.read_csv(StringIO(response), nrows=1)
#
# # Create a dictionary for the info file.
# info_iter = {}
# for p in info_df:
# info_iter[p] = info_df[p].iloc[0]
#
# df_iter = pd.read_csv(StringIO(response), skiprows=2)
#
# if np.diff(df_iter[0:2].Minute) == 30:
# interval = '30'
# info_iter['interval_in_hours'] = 0.5
# elif np.diff(df_iter[0:2].Minute) == 0:
# interval = '60'
# info_iter['interval_in_hours'] = 1
# else:
# print('Interval not understood!')
info_iter['interval_in_hours'] = interval / 60
# Set the time index in the pandas dataframe:
year_iter = str(df_iter['Year'][0])
df_iter = df_iter.set_index(
pd.date_range('1/1/{yr}'.format(yr=year_iter),
freq='{}Min'.format(interval),
periods=len(df_iter)))
df_iter.index = df_iter.index.tz_localize(
pytz.FixedOffset(float(info_iter['Time Zone'] * 60)))
if j == 0:
info = info_iter
df = df_iter
else:
df = df.append(df_iter)
# Process/compress the downloaded dfs.
info['timedelta_in_years'] = (df.index[-1] - df.index[0]).days / 365
# Convert to int for lowering file size.
dni = np.array(df['DNI'].astype(np.int16))
dhi = np.array(df['DHI'].astype(np.int16))
ghi = np.array(df['GHI'].astype(np.int16))
temp_air = np.array(df['Temperature'].astype(np.float32))
wind_speed = np.array(df['Wind Speed'].astype(np.float16))
year = np.array(df['Year'].astype(np.int16))
month = np.array(df['Month'].astype(np.int8))
day = np.array(df['Day'].astype(np.int8))
hour = np.array(df['Hour'].astype(np.int8))
minute = np.array(df['Minute'].astype(np.int8))
cache_directory = 'cached_weather_data'
if not os.path.exists(cache_directory):
print('Creating cache directory')
os.mkdir(cache_directory)
save_filename = os.path.join(cache_directory,
'{}_{:3.2f}_{:3.2f}_search-point_{:3.3f}_{:3.3f}.npz'.format(
info['Location ID'], info['Latitude'],
info['Longitude'], lat, lon)
)
# Write to file.
np.savez_compressed(save_filename,
Source=info['Source'],
Location_ID=info['Location ID'],
Latitude=info['Latitude'],
Longitude=info['Longitude'],
Elevation=info['Elevation'],
local_time_zone=info['Local Time Zone'],
interval_in_hours=info['interval_in_hours'],
timedelta_in_years=info['timedelta_in_years'],
Version=info['Version'],
dni=dni,
dhi=dhi,
ghi=ghi,
temp_air=temp_air,
wind_speed=wind_speed,
year=year,
month=month,
day=day,
hour=hour,
minute=minute)
# Reload from file.
df, info = nsrdb.get_local_weather_data(save_filename)
return df, info
# def ashrae_get_data():
# dir_path = os.path.dirname(os.path.realpath(__file__))
#
# # Load temperature difference data.
# ashrae = pd.read_csv(
# os.path.join(dir_path, 'ASHRAE2017_temperature_data.csv')
# )
# return ashrae
def ashrae_get_design_conditions_at_loc(lat, lon, ashrae):
"""
Get the ASHRAE design conditions data closest to the lat/lon of interest.
Parameters
----------
lat
lon
ashrae : dataframe
Returns
-------
dataframe
fields are
'Latitude'
'Longitude'
'Extreme Annual Mean Minimum Dry Bulb Temperature' - ASHRAE
extreme minimum dry bulb temperature, in C
"""
# df = ashrae_get_design_conditions()
# Calculate distance to search point.
distance = nsrdb.haversine_distance(lat, lon, ashrae['Lat'], ashrae['Lon'])
closest_idx = distance.idxmin()
return ashrae.iloc[closest_idx]
def nec_correction_factor(temperature):
"""
NEC 690.7(A)(2) correction factor from NEC2017.
Parameters
----------
temperature : numeric
Temperature in C.
Returns
-------
correction_factor : flat
"""
is_array = isinstance(temperature, np.ndarray)
temperature = np.array([temperature])
f = np.zeros_like(temperature, dtype='float') + 1
f[temperature < 25] = 1.02
f[temperature < 20] = 1.04
f[temperature < 15] = 1.06
f[temperature < 10] = 1.08
f[temperature < 5] = 1.10
f[temperature < 0] = 1.12
f[temperature < -5] = 1.14
f[temperature < -10] = 1.16
f[temperature < -15] = 1.18
f[temperature < -20] = 1.20
f[temperature < -25] = 1.21
f[temperature < -30] = 1.23
f[temperature < -35] = 1.25
f[np.isnan(temperature)] = np.nan
if not is_array:
f = f[0]
return f
def get_nsrdb_temperature_error(lat, lon,
number_of_closest_points=5):
"""
Find the temperature error for a particular location.
The NSRDB database provides temeprature data for many locations. However,
these data are taken from the MERRA-2 dataset, and have some error
compared to ground measurements. The temperature error depends on location.
As a comparison, we calculated the mean minimum extreme minimum dry bulb
temperature using NSRDB data and compared to ASHRAE data. The temperature
difference determines the safety factor necessary for string length
calculations.
This function finds the closest points to a particular lat,lon coordinate
in the ASHRAE dataset and returns the maximum temperature difference (
NSRDB - ASHRAE) for these locations. A higher temperature difference
means that the NSRDB is overestimating the true temperature that is
measured at a ground station. Higher positive temperature differences
mean that a larger safety factor should be used when calculating string
length. The Safety factor can be calculated
Examples
--------
temperature_difference = vocmax.get_nsrdb_temperature_error(lat,lon)
Parameters
----------
lat : float
latitude of search point in fractional degrees
lon : float
longitude of search point in fractional degrees
number_of_closest_points : int
The number of closest datapoints to find. Default is 5.
Returns
-------
temperature_difference : float
max temperature difference between NSRDB point and closest ASHRAE
points. A positive number means that the NSRDB design temperature is
higher than the ASHRAE design temperature. If a positive temperature
difference is found, then an additional safety factor is suggested to
account for this error in the NSRDB dataset.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
# Load temperature difference data.
df = pd.read_pickle(
os.path.join(dir_path, 'nsrdb_ashrae_comparison.pkl')
)
# Calculate distance to search point.
distance = vocmax.nsrdb.haversine_distance(lat, lon, df['lat'], df['lon'])
# Find the closest locations.
distance_sort = distance.sort_values()
closest_idx = distance_sort.index[:number_of_closest_points]
# Calculate temperature difference
temperature_difference = df['nsrdb-ashrae Extreme_Annual_Mean_Min_DB'].loc[
closest_idx]
return temperature_difference.max()
def ashrae_import_design_conditions(filename='2017DesignConditions_s.xlsx'):
"""
Load the ASHRAE 2017 design conditions excel file. This file is NOT
provided in vocmax, it must be purchased directly from ASHRAE and added
to the current directory. The filename is '2017DesignConditions_s.xlsx'.
The '_s' at the end of the filename stands for 'SI'. There is also
another file '2017DesignConditions_p.xlsx' that contains measurements in
imperial units, do not use this file.
In order to use this function, purchase the weather data viewer DVD,
version 6.0, available at:
https://www.techstreet.com/ashrae/standards/weather-data-viewer-dvd-version-6-0?ashrae_auth_token=<PASSWORD>89-8065208f2e36&product_id=1949790
Importing the excel file takes around 1 minute, the data is then saved as
a csv file with name filename + '.csv' in the current directory. This
makes loading quick the second time.
Parameters
----------
filename : string
Filename to import.
Returns
-------
df : dataframe
Pandas dataframe containing certain fields of the weather data file.
"""
# filename = '2017DesignConditions_s.xlsx'
df = pd.read_excel(filename,
skiprows=0,
sheet_name=0,
header=[1, 2, 3],
verbose=False)
filename_out = filename + '.csv'
df_out = pd.DataFrame(
{'Lat': np.array(df['Lat']).flatten(),
'Lon': np.array(df['Lon']).flatten(),
'Country': np.array(df['Country']).flatten(),
'Station Name': np.array(df['Station Name']).flatten(),
'Extreme_Annual_Mean_Min_DB': np.array(
df['Extreme Annual DB']['Mean']['Min']).flatten(),
'Extreme_Annual_Standard Deviation_Min_DB': np.array(
df['Extreme Annual DB']['Standard Deviation']['Min']).flatten(),
'20-Year Return Period Extreme Min DB': np.array(
df['n-Year Return Period Values of Extreme DB']['n=20 years'][
'Min']).flatten(),
}
)
df_out.to_csv(filename_out, index=False)
return df_out
def ashrae_is_design_conditions_available(
filename='2017DesignConditions_s.xlsx'):
return os.path.exists(filename)
def ashrae_get_design_conditions(filename='2017DesignConditions_s.xlsx'):
"""
Get the ASHRAE design conditions data.
Parameters
----------
filename
Returns
-------
df : dataframe
Pandas dataframe containing certain fields of the ASHARE design
conditions file
"""
if os.path.exists(filename + '.csv'):
df = | pd.read_csv(filename + '.csv') | pandas.read_csv |
import pandas as pd
import os
# Global Constants
US_URL = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us.csv"
STATES_URL = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv"
COUNTIES_URL = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv"
RELATIVE_PATH = "data-truth/nytimes"
# Download raw data
us = pd.read_csv(US_URL, dtype={'cases': int, 'deaths': int}).fillna(value = 'NA')
us['date'] = pd.to_datetime(us['date'])
us.to_csv(os.path.join(RELATIVE_PATH,"raw/us.csv"), index = False)
states = pd.read_csv(STATES_URL, dtype={'fips': str, 'cases': int, 'deaths': int}).fillna(value = 'NA')
states['date'] = pd.to_datetime(states['date'])
states.to_csv(os.path.join(RELATIVE_PATH,"raw/us-states.csv"), index = False)
counties = | pd.read_csv(COUNTIES_URL, dtype={'fips': str, 'cases': int, 'deaths': int}) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import os
import pprint
import pandas as pd
from collections import OrderedDict
def get_parameters():
# Read Data
try:
#print('Read local table')
df_357 = pd.read_excel(
io=os.path.join(os.path.dirname(__file__), 'data', 'tab_conama_357.xlsx'),
sheet_name='conama_357',
index_col=0,
)
except Exception as e:
#print(e, '\n')
#print('Read table from GitHub')
df_357 = pd.read_excel(
io='https://raw.githubusercontent.com/gaemapiracicaba/norma_res_conama_357-05/main/src/normas/data/tab_conama_357.xlsx',
sheet_name='conama_357',
index_col=0,
)
# Filter only quality
df_357 = df_357.loc[(df_357['tipo_padrao'] == 'qualidade')]
#print(df_357.head())
# Classes
list_classes = list(set(df_357['padrao_qualidade']))
list_classes = [x for x in list_classes if | pd.notnull(x) | pandas.notnull |
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from tkinter import filedialog
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from scipy.interpolate import make_interp_spline, BSpline
from mpldatacursor import datacursor
from matplotlib import style
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from bisect import bisect_left
from scipy import interpolate
import math
import matplotlib.pyplot as plt
import matplotlib
import tkinter as tk
import pandas as pd
import glob
import numpy as np
import matplotlib.pylab as pylab
from scipy.optimize import root_scalar
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'xx-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
matplotlib.use('Qt5Agg')
style.use("ggplot")
def dBm2W(dBm):
return 10**(dBm/10)
def graficoBunito(x, y, points):
xnew = np.linspace(x.min(), x.max(), int(points))
spl = make_interp_spline(x, y, k=3) #BSpline object
ynew = spl(xnew)
return xnew, ynew, spl
class Ui_MainWindow(QtWidgets.QMainWindow):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1280, 720)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setEnabled(True)
self.tabWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.tabWidget.setObjectName("tabWidget")
self.diagRad = QtWidgets.QWidget()
self.diagRad.setObjectName("diagRad")
self.gridLayout_2 = QtWidgets.QGridLayout(self.diagRad)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.diagRad)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.diagRad)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.folderPath = QtWidgets.QLineEdit(self.diagRad)
self.folderPath.setEnabled(True)
self.folderPath.setReadOnly(True)
self.folderPath.setObjectName("folderPath")
self.verticalLayout_2.addWidget(self.folderPath)
self.folderPath_4 = QtWidgets.QLineEdit(self.diagRad)
self.folderPath_4.setReadOnly(True)
self.folderPath_4.setClearButtonEnabled(False)
self.folderPath_4.setObjectName("folderPath_4")
self.verticalLayout_2.addWidget(self.folderPath_4)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.browseFolder = QtWidgets.QPushButton(self.diagRad)
self.browseFolder.setObjectName("browseFolder")
self.verticalLayout_7.addWidget(self.browseFolder)
self.browseFolder_4 = QtWidgets.QPushButton(self.diagRad)
self.browseFolder_4.setObjectName("browseFolder_4")
self.verticalLayout_7.addWidget(self.browseFolder_4)
self.horizontalLayout.addLayout(self.verticalLayout_7)
self.verticalLayout_8.addLayout(self.horizontalLayout)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_freq = QtWidgets.QLabel(self.diagRad)
self.label_freq.setObjectName("label_freq")
self.horizontalLayout_2.addWidget(self.label_freq)
self.cb_frequency_4 = QtWidgets.QComboBox(self.diagRad)
self.cb_frequency_4.setObjectName("cb_frequency_4")
self.horizontalLayout_2.addWidget(self.cb_frequency_4)
self.horizontalLayout_4.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_what_plot = QtWidgets.QLabel(self.diagRad)
self.label_what_plot.setObjectName("label_what_plot")
self.horizontalLayout_3.addWidget(self.label_what_plot)
self.cb_what_plot = QtWidgets.QComboBox(self.diagRad)
self.cb_what_plot.setObjectName("cb_what_plot")
self.horizontalLayout_3.addWidget(self.cb_what_plot)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
self.saveCsv = QtWidgets.QPushButton(self.diagRad)
self.saveCsv.setObjectName("saveCsv")
self.horizontalLayout_4.addWidget(self.saveCsv)
self.verticalLayout_8.addLayout(self.horizontalLayout_4)
self.gridLayout_2.addLayout(self.verticalLayout_8, 0, 0, 1, 1)
'''
self.graphicsView = QtWidgets.QGraphicsView(self.diagRad)
self.graphicsView.setObjectName("graphicsView")
'''
self.canvas = FigureCanvas(Figure(figsize=(7, 7)))
self.ax = self.canvas.figure.add_subplot(111, polar=True)
self.ax.set_theta_zero_location("N")
self.ax.autoscale(enable = False)
self.ax.set_rmax(-15)
self.ax.set_rmin(-45)
self.gridLayout_2.addWidget(self.canvas, 1, 0, 1, 1)
self.toolbar = NavigationToolbar(self.canvas, self)
self.gridLayout_2.addWidget(self.toolbar, 2, 0, 1, 1)
self.splitter = QtWidgets.QSplitter(self.diagRad)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.normalize = QtWidgets.QCheckBox(self.splitter)
self.normalize.setObjectName("normalize")
self.hold = QtWidgets.QCheckBox(self.splitter)
self.hold.setObjectName("hold")
self.clearBtn_2 = QtWidgets.QPushButton(self.splitter)
self.clearBtn_2.setObjectName("clearBtn_2")
self.gridLayout_2.addWidget(self.splitter, 3, 0, 1, 1)
self.tabWidget.addTab(self.diagRad, "")
self.dist = QtWidgets.QWidget()
self.dist.setObjectName("dist")
self.gridLayout_4 = QtWidgets.QGridLayout(self.dist)
self.gridLayout_4.setObjectName("gridLayout_4")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout()
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.label_13 = QtWidgets.QLabel(self.dist)
self.label_13.setObjectName("label_13")
self.horizontalLayout_26.addWidget(self.label_13)
self.folderPath_2 = QtWidgets.QLineEdit(self.dist)
self.folderPath_2.setObjectName("folderPath_2")
self.folderPath_2.setReadOnly(True)
self.horizontalLayout_26.addWidget(self.folderPath_2)
self.horizontalLayout_25.addLayout(self.horizontalLayout_26)
self.browseFolder_2 = QtWidgets.QPushButton(self.dist)
self.browseFolder_2.setObjectName("browseFolder_2")
self.horizontalLayout_25.addWidget(self.browseFolder_2)
self.gridLayout_4.addLayout(self.horizontalLayout_25, 0, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.label_14 = QtWidgets.QLabel(self.dist)
self.label_14.setObjectName("label_14")
self.horizontalLayout_27.addWidget(self.label_14)
self.cb_frequency_2 = QtWidgets.QComboBox(self.dist)
self.cb_frequency_2.setObjectName("cb_frequency_2")
self.horizontalLayout_27.addWidget(self.cb_frequency_2)
self.horizontalLayout_5.addLayout(self.horizontalLayout_27)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName("horizontalLayout_28")
self.label_15 = QtWidgets.QLabel(self.dist)
self.label_15.setObjectName("label_15")
self.horizontalLayout_28.addWidget(self.label_15)
self.cb_what_plot_2 = QtWidgets.QComboBox(self.dist)
self.cb_what_plot_2.setObjectName("cb_what_plot_2")
self.horizontalLayout_28.addWidget(self.cb_what_plot_2)
self.horizontalLayout_5.addLayout(self.horizontalLayout_28)
self.saveCsv_2 = QtWidgets.QPushButton(self.dist)
self.saveCsv_2.setObjectName("saveCsv_2")
self.horizontalLayout_5.addWidget(self.saveCsv_2)
self.gridLayout_4.addLayout(self.horizontalLayout_5, 1, 0, 1, 1)
self.canvas_2 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_2 = self.canvas_2.figure.add_subplot(111)
self.gridLayout_4.addWidget(self.canvas_2, 2, 0, 1, 1)
self.toolbar_2 = NavigationToolbar(self.canvas_2, self)
self.gridLayout_4.addWidget(self.toolbar_2, 3, 0, 1 ,1)
self.splitter_4 = QtWidgets.QSplitter(self.dist)
self.splitter_4.setOrientation(QtCore.Qt.Horizontal)
self.splitter_4.setObjectName("splitter_4")
self.normalize_2 = QtWidgets.QCheckBox(self.splitter_4)
self.normalize_2.setObjectName("normalize_2")
self.hold_2 = QtWidgets.QCheckBox(self.splitter_4)
self.hold_2.setObjectName("hold_2")
self.clearBtn_3 = QtWidgets.QPushButton(self.splitter_4)
self.clearBtn_3.setObjectName("clearBtn_3")
self.gridLayout_4.addWidget(self.splitter_4, 4, 0, 1, 1)
self.tabWidget.addTab(self.dist, "")
self.perdas = QtWidgets.QWidget()
self.perdas.setObjectName("perdas")
self.gridLayout_5 = QtWidgets.QGridLayout(self.perdas)
self.gridLayout_5.setObjectName("gridLayout_5")
self.verticalLayout_15 = QtWidgets.QVBoxLayout()
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.horizontalLayout_31 = QtWidgets.QHBoxLayout()
self.horizontalLayout_31.setObjectName("horizontalLayout_31")
self.horizontalLayout_32 = QtWidgets.QHBoxLayout()
self.horizontalLayout_32.setObjectName("horizontalLayout_32")
self.label_16 = QtWidgets.QLabel(self.perdas)
self.label_16.setObjectName("label_16")
self.horizontalLayout_32.addWidget(self.label_16)
self.folderPath_3 = QtWidgets.QLineEdit(self.perdas)
self.folderPath_3.setObjectName("folderPath_3")
self.folderPath_3.setReadOnly(True)
self.horizontalLayout_32.addWidget(self.folderPath_3)
self.horizontalLayout_31.addLayout(self.horizontalLayout_32)
self.browseFolder_3 = QtWidgets.QPushButton(self.perdas)
self.browseFolder_3.setObjectName("browseFolder_3")
self.horizontalLayout_31.addWidget(self.browseFolder_3)
self.verticalLayout_17.addLayout(self.horizontalLayout_31)
self.verticalLayout_16.addLayout(self.verticalLayout_17)
self.canvas_3 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_3 = self.canvas_3.figure.add_subplot(111)
self.verticalLayout_16.addWidget(self.canvas_3)
self.toolbar_3 = NavigationToolbar(self.canvas_3, self)
self.verticalLayout_16.addWidget(self.toolbar_3)
self.verticalLayout_15.addLayout(self.verticalLayout_16)
self.splitter_5 = QtWidgets.QSplitter(self.perdas)
self.splitter_5.setOrientation(QtCore.Qt.Horizontal)
self.splitter_5.setObjectName("splitter_5")
self.verticalLayout_15.addWidget(self.splitter_5)
self.gridLayout_5.addLayout(self.verticalLayout_15, 0, 0, 1, 1)
self.tabWidget.addTab(self.perdas, "")
self.tab = QtWidgets.QWidget()
self.tab.setEnabled(True)
self.tab.setObjectName("tab")
self.gridLayout_7 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_7.setObjectName("gridLayout_7")
self.splitter_2 = QtWidgets.QSplitter(self.tab)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.layoutWidget = QtWidgets.QWidget(self.splitter_2)
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.verticalLayout_4.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout_4.addWidget(self.label_4)
self.label_freq_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_freq_2.setObjectName("label_freq_2")
self.verticalLayout_4.addWidget(self.label_freq_2)
self.horizontalLayout_8.addLayout(self.verticalLayout_4)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.folderPath_5 = QtWidgets.QLineEdit(self.layoutWidget)
self.folderPath_5.setMinimumSize(QtCore.QSize(81, 0))
self.folderPath_5.setObjectName("folderPath_5")
self.folderPath_5.setReadOnly(True)
self.verticalLayout_5.addWidget(self.folderPath_5)
self.folderPath_6 = QtWidgets.QLineEdit(self.layoutWidget)
self.folderPath_6.setMinimumSize(QtCore.QSize(81, 20))
self.folderPath_6.setObjectName("folderPath_6")
self.folderPath_6.setReadOnly(True)
self.verticalLayout_5.addWidget(self.folderPath_6)
self.cb_frequency_3 = QtWidgets.QComboBox(self.layoutWidget)
self.cb_frequency_3.setMinimumSize(QtCore.QSize(81, 20))
self.cb_frequency_3.setObjectName("cb_frequency_3")
self.verticalLayout_5.addWidget(self.cb_frequency_3)
self.horizontalLayout_8.addLayout(self.verticalLayout_5)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.browseFolder_6 = QtWidgets.QPushButton(self.layoutWidget)
self.browseFolder_6.setObjectName("browseFolder_6")
self.verticalLayout_6.addWidget(self.browseFolder_6)
self.browseFolder_5 = QtWidgets.QPushButton(self.layoutWidget)
self.browseFolder_5.setObjectName("browseFolder_5")
self.verticalLayout_6.addWidget(self.browseFolder_5)
self.saveCsv_3 = QtWidgets.QPushButton(self.layoutWidget)
self.saveCsv_3.setObjectName("saveCsv_3")
self.verticalLayout_6.addWidget(self.saveCsv_3)
self.horizontalLayout_8.addLayout(self.verticalLayout_6)
self.line = QtWidgets.QFrame(self.splitter_2)
self.line.setMaximumSize(QtCore.QSize(3, 16777215))
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.widget = QtWidgets.QWidget(self.splitter_2)
self.widget.setObjectName("widget")
self.gridLayout_6 = QtWidgets.QGridLayout(self.widget)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.GainCheckBox = QtWidgets.QCheckBox(self.widget)
self.GainCheckBox.setObjectName("GainCheckBox")
self.verticalLayout_12.addWidget(self.GainCheckBox)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setObjectName("label_5")
self.horizontalLayout_7.addWidget(self.label_5)
self.cb_Gain_1 = QtWidgets.QComboBox(self.widget)
self.cb_Gain_1.setMinimumSize(QtCore.QSize(81, 20))
self.cb_Gain_1.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.cb_Gain_1.setObjectName("cb_Gain_1")
self.horizontalLayout_7.addWidget(self.cb_Gain_1)
self.verticalLayout_12.addLayout(self.horizontalLayout_7)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.horizontalLayout_6.addWidget(self.label_6)
self.cb_Gain_2 = QtWidgets.QComboBox(self.widget)
self.cb_Gain_2.setMinimumSize(QtCore.QSize(81, 20))
self.cb_Gain_2.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.cb_Gain_2.setObjectName("cb_Gain_2")
self.horizontalLayout_6.addWidget(self.cb_Gain_2)
self.verticalLayout_12.addLayout(self.horizontalLayout_6)
self.gridLayout_6.addLayout(self.verticalLayout_12, 0, 0, 1, 1)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_10 = QtWidgets.QLabel(self.widget)
self.label_10.setText("")
self.label_10.setObjectName("label_10")
self.verticalLayout_3.addWidget(self.label_10)
self.label_7 = QtWidgets.QLabel(self.widget)
self.label_7.setObjectName("label_7")
self.verticalLayout_3.addWidget(self.label_7)
self.line_Gain_Output = QtWidgets.QLineEdit(self.widget)
self.line_Gain_Output.setMinimumSize(QtCore.QSize(81, 20))
self.line_Gain_Output.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.line_Gain_Output.setObjectName("line_Gain_Output")
self.line_Gain_Output.setReadOnly(True)
self.verticalLayout_3.addWidget(self.line_Gain_Output)
self.gridLayout_6.addLayout(self.verticalLayout_3, 0, 1, 1, 1)
self.gridLayout_7.addWidget(self.splitter_2, 0, 0, 1, 1)
self.canvas_4 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_4 = self.canvas_4.figure.add_subplot(111)
self.gridLayout_7.addWidget(self.canvas_4, 1, 0, 1, 1)
self.toolbar_4 = NavigationToolbar(self.canvas_4, self)
self.gridLayout_7.addWidget(self.toolbar_4, 2, 0, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.normalize_3 = QtWidgets.QCheckBox(self.tab)
self.normalize_3.setObjectName("normalize_3")
self.gridLayout.addWidget(self.normalize_3, 3, 0, 1, 1)
self.hold_3 = QtWidgets.QCheckBox(self.tab)
self.hold_3.setObjectName("hold_3")
self.gridLayout.addWidget(self.hold_3, 3, 1, 1, 1)
self.clearBtn_4 = QtWidgets.QPushButton(self.tab)
self.clearBtn_4.setObjectName("clearBtn_4")
self.gridLayout.addWidget(self.clearBtn_4, 3, 2, 1, 1)
self.gridLayout_7.addLayout(self.gridLayout, 3, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_9 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_9.setObjectName("gridLayout_9")
self.gridLayout_8 = QtWidgets.QGridLayout()
self.gridLayout_8.setObjectName("gridLayout_8")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.label_8 = QtWidgets.QLabel(self.tab_2)
self.label_8.setMaximumSize(QtCore.QSize(52, 16))
self.label_8.setObjectName("label_8")
self.verticalLayout_9.addWidget(self.label_8)
self.label_9 = QtWidgets.QLabel(self.tab_2)
self.label_9.setMaximumSize(QtCore.QSize(52, 16))
self.label_9.setObjectName("label_9")
self.verticalLayout_9.addWidget(self.label_9)
self.label_12 = QtWidgets.QLabel(self.tab_2)
self.label_12.setObjectName("label_12")
self.label_12.setMaximumSize(QtCore.QSize(52, 16))
self.verticalLayout_9.addWidget(self.label_12)
self.horizontalLayout_9.addLayout(self.verticalLayout_9)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.line_med1 = QtWidgets.QLineEdit(self.tab_2)
self.line_med1.setObjectName("line_med1")
self.verticalLayout_10.addWidget(self.line_med1)
self.line_med2 = QtWidgets.QLineEdit(self.tab_2)
self.line_med2.setObjectName("line_med2")
self.verticalLayout_10.addWidget(self.line_med2)
self.line_perdas = QtWidgets.QLineEdit(self.tab_2)
self.line_perdas.setObjectName("line_perdas")
self.verticalLayout_10.addWidget(self.line_perdas)
self.horizontalLayout_9.addLayout(self.verticalLayout_10)
self.verticalLayout_11 = QtWidgets.QVBoxLayout()
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.estimate_gain_1_btn = QtWidgets.QPushButton(self.tab_2)
self.estimate_gain_1_btn.setMaximumSize(QtCore.QSize(75, 20))
self.estimate_gain_1_btn.setObjectName("estimate_gain_1_btn")
self.verticalLayout_11.addWidget(self.estimate_gain_1_btn)
self.estimate_gain_2_btn = QtWidgets.QPushButton(self.tab_2)
self.estimate_gain_2_btn.setMaximumSize(QtCore.QSize(75, 20))
self.estimate_gain_2_btn.setObjectName("estimate_gain_2_btn")
self.verticalLayout_11.addWidget(self.estimate_gain_2_btn)
self.estimate_gain_3_btn = QtWidgets.QPushButton(self.tab_2)
self.estimate_gain_3_btn.setMaximumSize(QtCore.QSize(75, 20))
self.estimate_gain_3_btn.setObjectName("estimate_gain_3_btn")
self.verticalLayout_11.addWidget(self.estimate_gain_3_btn)
self.horizontalLayout_9.addLayout(self.verticalLayout_11)
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.label_11 = QtWidgets.QLabel(self.tab_2)
self.label_11.setObjectName("label_11")
self.verticalLayout_13.addWidget(self.label_11)
self.gainEstimateFrequency = QtWidgets.QComboBox(self.tab_2)
self.gainEstimateFrequency.setObjectName("gainEstimateFrequency")
self.verticalLayout_13.addWidget(self.gainEstimateFrequency)
self.horizontalLayout_9.addLayout(self.verticalLayout_13)
self.gridLayout_8.addLayout(self.horizontalLayout_9, 0, 0, 1, 1)
self.canvas_5 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_5 = self.canvas_5.figure.add_subplot(111)
self.gridLayout_8.addWidget(self.canvas_5, 1, 0, 1, 1)
self.toolbar_5 = NavigationToolbar(self.canvas_5, self)
self.gridLayout_8.addWidget(self.toolbar_5, 2, 0, 1, 1)
'''
self.graphicsView_estimativa = QtWidgets.QGraphicsView(self.tab_2)
self.graphicsView_estimativa.setObjectName("graphicsView_estimativa")
self.gridLayout_8.addWidget(self.graphicsView_estimativa, 1, 0, 1, 1)
'''
self.gridLayout_9.addLayout(self.gridLayout_8, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout_3.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 782, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionHelp = QtWidgets.QAction(MainWindow)
self.actionHelp.setObjectName("actionHelp")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addAction(self.actionHelp)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.browseFolder.clicked.connect(self.load_csv)
self.browseFolder_2.clicked.connect(self.load_csv_2)
self.browseFolder_3.clicked.connect(self.load_csv_file)
self.browseFolder_4.clicked.connect(self.load_csv_file_3)
self.browseFolder_5.clicked.connect(self.load_csv_file_2)
self.browseFolder_6.clicked.connect(self.load_csv_3)
self.clearBtn_2.clicked.connect(self.clear_plot)
self.clearBtn_3.clicked.connect(self.clear_plot_3)
self.clearBtn_4.clicked.connect(self.clear_plot_2)
self.saveCsv.clicked.connect(self.save_csv)
self.saveCsv_2.clicked.connect(self.save_csv_2)
self.saveCsv_3.clicked.connect(self.save_csv_3)
self.cb_frequency_4.activated.connect(self.update_plot)
self.cb_frequency_2.activated.connect(self.update_plot_2)
self.cb_frequency_3.activated.connect(self.update_plot_3)
self.cb_what_plot.activated.connect(self.what_plot)
self.cb_what_plot_2.activated.connect(self.what_plot_2)
self.GainCheckBox.stateChanged.connect(self.GainEstimateEnabled)
self.cb_Gain_1.activated.connect(self.GainEstimate)
self.cb_Gain_2.activated.connect(self.GainEstimate)
self.GainEstimateEnabled = False
self.estimate_gain_1_btn.clicked.connect(self.LoadGainMeasurement1)
self.estimate_gain_2_btn.clicked.connect(self.LoadGainMeasurement2)
self.estimate_gain_3_btn.clicked.connect(self.LoadGainLossMeasurement)
self.gainEstimateFrequency.activated.connect(self.EstimateGain)
self.folderLoaded = False
self.folderLoaded_2 = False
self.lossLoaded = False
self.lossLoaded_perda = False
self.med1Loaded = False
self.med2Loaded = False
self.medPerdaLoaded = False
self.scatGain = False
def EstimateGain(self):
if not self.med1Loaded:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Medição 1 não foi carregada corretamente!"),
QtWidgets.QMessageBox.Ok)
elif not self.med2Loaded:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Medição 2 não foi carregada corretamente!"),
QtWidgets.QMessageBox.Ok)
elif not self.medPerdaLoaded:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Medição de Perdas não foi carregada corretamente!"),
QtWidgets.QMessageBox.Ok)
else:
def func(k):
return G1dB*(1 - math.exp(-k*float(D2))) - G2dB*(1 - math.exp(-k*float(D1)))
def Alfredo(k, gain, x):
return gain*(1 - np.exp(-k*x))
D1 = self.GainMed1_path.name.replace('.CSV', '')[-3:]
D2 = self.GainMed2_path.name.replace('.CSV', '')[-3:]
desFreq = round(float(self.gainEstimateFrequency.currentText())*1e9)
D1S21 = self.GainMed1[self.GainMed1.Frequency == float(desFreq)].S21.values[0]
D2S21 = self.GainMed2[self.GainMed2.Frequency == float(desFreq)].S21.values[0]
#D1S21 = S21D1[S21D1.Distancia == float(D1)].S21.values[0]
#D2S21 = S21D2[S21D2.Distancia == float(D2)].S21.values[0]
D1 = float(D1)/100
D2 = float(D2)/100
perda = self.funcaoPerdaGain(desFreq/1e9)
D1S21W = dBm2W(D1S21 - perda)
D2S21W = dBm2W(D2S21 - perda)
lmbda = 3e8/desFreq
G1 = np.sqrt(D1S21W)*(4*np.pi*float(D1))/lmbda
G2 = np.sqrt(D2S21W)*(4*np.pi*float(D2))/lmbda
if float(D1) != 0.0 and float(D2) != 0.0 and D1 != D2:
G1dB = 10*np.log10(G1)
G2dB = 10*np.log10(G2)
if self.scatGain:
print('Tem Scat', self.scatGain)
self.scatGain.remove()
#self.approxGain.pop(0).remove()
self.canvas_5.draw_idle()
self.scatGain = self.ax_5.scatter([float(D1)*100, float(D2)*100], [G1dB, G2dB], label='Medições')
print(self.scatGain)
self.canvas_5.draw_idle()
#print(f'\nOrigi = {D1S21}, perda = {perda}, S21 = {D1S21 - perda}, S21W = {D1S21W}, dist = {D1}, ganho = {G1dB}')
#print(f'Origi = {D2S21}, perda = {perda},S21 = {D2S21 - perda}, S21W = {D2S21W}, dist = {D2}, ganho = {G2dB}')
kmax = [0.1, 1000]
try:
sol = root_scalar(func, method='toms748', bracket = kmax)
k = sol.root
Gcd = G1dB/(1-math.exp(-k*float(D1)))
print(f'k = {k}, Gcd = {Gcd}')
x2 = np.arange(0, 6, 0.10)
self.approxGain = self.ax_5.plot(x2*100, Alfredo(k, Gcd, x2), label=f'G = {round(Gcd,2)} dB')
legenda = self.ax_5.legend(bbox_to_anchor=(0, 1.02, 1, .102), borderaxespad=0, loc="right")
legenda.set_draggable(True)
except:
pass
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Estimativa Erro"),
QtWidgets.qApp.tr("Não foi possível achar uma solução para k = [0.1, 1000]"),
QtWidgets.QMessageBox.Ok)
def LoadGainMeasurement1(self):
root = tk.Tk()
root.withdraw()
self.GainMed1_path = filedialog.askopenfile()
try:
self.GainMed1= pd.read_csv(self.GainMed1_path, header=2, engine='python')
self.line_med1.setText(self.GainMed1_path.name)
dist1 = self.GainMed1_path.name.replace('.CSV', '')[-3:]
self.GainMed1.rename(columns = {self.GainMed1.columns[1]: 'S21', self.GainMed1.columns[2]: 'Phase'}, inplace = True)
self.gainFreq1 = self.GainMed1.Frequency.unique()/1e9
print(f'Frequências 1 = {self.gainFreq1}')
# self.freq_loss = self.df_4.iloc[:,0]/1e9
#self.loss = self.df_4.iloc[:,1]
#nada, fon, self.funcao_perda = graficoBunito(self.freq_loss, self.loss, self.freq_loss.size*3)
self.med1Loaded = True
except:
pass
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Erro ao abrir Medição 1!"),
QtWidgets.QMessageBox.Ok)
def LoadGainMeasurement2(self):
root = tk.Tk()
root.withdraw()
self.GainMed2_path = filedialog.askopenfile()
try:
self.GainMed2= | pd.read_csv(self.GainMed2_path, header=2, engine='python') | pandas.read_csv |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
import pandas as pd
import numpy as np
# JSON parsing
import json
# HTML parsing
from lxml import etree
import urllib
# SQLite RDBMS
import sqlite3
# Time conversions
import time
# Parallel processing
import swifter
# NoSQL DB
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError, OperationFailure
import os
# TODO: Adapt the data loading code from class.
# YOUR CODE HERE
def get_df(rel):
ret = pd.DataFrame(rel).fillna('')
for k in ret.keys():
ret[k] = ret[k].astype(str)
return ret
def extract_relation(rel, name):
'''
Pull out a nested list that has a key, and return it as a list
of dictionaries suitable for treating as a relation / dataframe
'''
# We'll return a list
ret = []
if name in rel:
ret2 = rel.pop(name)
try:
# Try to parse the string as a dictionary
ret2 = json.loads(ret2.replace('\'','\"'))
except:
# If we get an error in parsing, we'll leave as a string
pass
# If it's a dictionary, add it to our return results after
# adding a key to the parent
if isinstance(ret2, dict):
item = ret2
item['person'] = rel['_id']
ret.append(item)
else:
# If it's a list, iterate over each item
index = 0
for r in ret2:
item = r
if not isinstance(item, dict):
item = {'person': rel['_id'], 'value': item}
else:
item['person'] = rel['_id']
# A fix to a typo in the data
if 'affilition' in item:
item['affiliation'] = item.pop('affilition')
item['pos'] = index
index = index + 1
ret.append(item)
return ret
def data_loading(file, dbname='linkedin.db', filetype='localobj', LIMIT=20000):
if(filetype == 'localpath'):
# linked_in = urllib.request.urlopen('file://' + cwd + '/' + file)
linked_in = open(file)
elif(filetype == 'localobj'):
linked_in = file
else: #URL
linked_in = urllib.request.urlopen(file)
names = []
people = []
groups = []
education = []
skills = []
experience = []
honors = []
also_view = []
events = []
lines = []
i = 0
# LIMIT = 20000 # Max records to parse
for line in file:
try:
line = line.decode('utf-8')
except:
line = line
try:
person = json.loads(line)
# By inspection, all of these are nested dictionary or list content
nam = extract_relation(person, 'name')
edu = extract_relation(person, 'education')
grp = extract_relation(person, 'group')
skl = extract_relation(person, 'skills')
exp = extract_relation(person, 'experience')
hon = extract_relation(person, 'honors')
als = extract_relation(person, 'also_view')
eve = extract_relation(person, 'events')
# This doesn't seem relevant and it's the only
# non-string field that's sometimes null
if 'interval' in person:
person.pop('interval')
lines.append(person)
names = names + nam
education = education + edu
groups = groups + grp
skills = skills + skl
experience = experience + exp
honors = honors + hon
also_view = also_view + als
events = events + eve
except:
pass
i = i + 1
if(i % 10000 == 0):
print (i)
if i >= LIMIT:
break
people_df = get_df(pd.DataFrame(lines))
names_df = get_df(pd.DataFrame(names))
education_df = get_df(pd.DataFrame(education))
groups_df = get_df(pd.DataFrame(groups))
skills_df = get_df(pd.DataFrame(skills))
experience_df = get_df( | pd.DataFrame(experience) | pandas.DataFrame |
import os
import sys
import requests
import re
import pandas as pd
from save_to_xlsx import append_df_to_excel
# APPLICATION INFO
client_id = "ppYCMnYAz3em2lZ4Oisn"
client_secret = "bUstOMZXpg"
# CONSTS
baseURL = "https://openapi.naver.com/v1/search/local.json"
headers = {"X-Naver-Client-Id": client_id,
"X-Naver-Client-Secret": client_secret}
filename = 'naver_data.xlsx'
sheet_name = 'Data'
keywords = | pd.read_csv('keyword.csv') | pandas.read_csv |
print('starting up...')
# IMPORTING LIBRARIES -------------------------------------
#region
import PySimpleGUI as sg
import pandas as pd
import numpy as np
import os
import re
import glob
import csv
import shutil
import datetime
from datetime import datetime
#endregion
# GUI ------------------------------------------------------
print('Opening GUI')
sg.theme('DarkAmber') # Add a touch of color
# All the stuff inside your window.
layout = [ [sg.Text('Example - C:\FILES ')],
[sg.Text('Enter folder path'), sg.InputText()],
[sg.Text('Example - data if data.csv ')],
[sg.Text('Enter file name'), sg.InputText()],
[sg.Text('CLICK ON OK TO MAKE IT RUN')],
[sg.Button('Ok'), sg.Button('Cancel')]
]
# Create the Window
window = sg.Window('Window Title', layout)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
break
input_folder = str(values[0])
input_filename = str(values[1])
# LOGIC TO DO ---------------------------------------------------------
input_filename = input_filename + '.csv'
filepath_name =str(input_folder + str('\\') + input_filename)
print('input')
print(filepath_name)
output_folder = input_folder.replace('\\','/')
output_filenamepath = output_folder + '/PPD-Data.xlsx'
print('output is here')
print(output_filenamepath)
# READ FILE -----------------------------------------------
print('reading csv file')
df_file = | pd.read_csv(filepath_name) | pandas.read_csv |
# -*- coding: utf-8 -*-.
"""Provides default routines for solar wind and geospace indices
"""
from __future__ import print_function
from __future__ import absolute_import
import pandas as pds
import numpy as np
import pysat
def combine_kp(standard_inst=None, recent_inst=None, forecast_inst=None,
start=None, stop=None, fill_val=np.nan):
""" Combine the output from the different Kp sources for a range of dates
Parameters
----------
standard_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'kp' name,
and '' tag or None to exclude (default=None)
recent_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'kp' name,
and 'recent' tag or None to exclude (default=None)
forecast_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'kp' name,
and 'forecast' tag or None to exclude (default=None)
start : (dt.datetime or NoneType)
Starting time for combining data, or None to use earliest loaded
date from the pysat Instruments (default=None)
stop : (dt.datetime)
Ending time for combining data, or None to use the latest loaded date
from the pysat Instruments (default=None)
fill_val : (int or float)
Desired fill value (since the standard instrument fill value differs
from the other sources) (default=np.nan)
Returns
-------
kp_inst : (pysat.Instrument)
Instrument object containing Kp observations for the desired period of
time, merging the standard, recent, and forecasted values based on
their reliability
Notes
-----
Merging prioritizes the standard data, then the recent data, and finally
the forecast data
Will not attempt to download any missing data, but will load data
"""
notes = "Combines data from"
# Create an ordered list of the Instruments, excluding any that are None
all_inst = list()
tag = 'combined'
inst_flag = None
if standard_inst is not None:
all_inst.append(standard_inst)
tag += '_standard'
if inst_flag is None:
inst_flag = 'standard'
if recent_inst is not None:
all_inst.append(recent_inst)
tag += '_recent'
if inst_flag is None:
inst_flag = 'recent'
if forecast_inst is not None:
all_inst.append(forecast_inst)
tag += '_forecast'
if inst_flag is None:
inst_flag = 'forecast'
if len(all_inst) < 2:
raise ValueError("need at two Kp Instrument objects to combine them")
# If the start or stop times are not defined, get them from the Instruments
if start is None:
stimes = [inst.index.min() for inst in all_inst if len(inst.index) > 0]
start = min(stimes) if len(stimes) > 0 else None
if stop is None:
stimes = [inst.index.max() for inst in all_inst if len(inst.index) > 0]
stop = max(stimes) if len(stimes) > 0 else None
stop += pds.DateOffset(days=1)
if start is None or stop is None:
raise ValueError("must either load in Instrument objects or provide" +
" starting and ending times")
# Initialize the output instrument
kp_inst = pysat.Instrument()
kp_inst.platform = all_inst[0].platform
kp_inst.name = all_inst[0].name
kp_inst.tag = tag
kp_inst.date = start
kp_inst.doy = int(start.strftime("%j"))
kp_inst.meta = pysat.Meta()
pysat.instruments.sw_kp.initialize_kp_metadata(kp_inst.meta, 'Kp',
fill_val=fill_val)
kp_times = list()
kp_values = list()
# Cycle through the desired time range
itime = start
while itime < stop and inst_flag is not None:
# Load and save the standard data for as many times as possible
if inst_flag == 'standard':
standard_inst.load(date=itime)
if notes.find("standard") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
if len(standard_inst.index) == 0:
inst_flag = 'forecast' if recent_inst is None else 'recent'
notes += "{:})".format(itime.date())
else:
kp_times.extend(list(standard_inst.index))
kp_values.extend(list(standard_inst['Kp']))
itime = kp_times[-1] + pds.DateOffset(hours=3)
# Load and save the recent data for as many times as possible
if inst_flag == 'recent':
# Determine which files should be loaded
if len(recent_inst.index) == 0:
files = np.unique(recent_inst.files.files[itime:stop])
else:
files = [None] # No load needed, if already initialized
# Cycle through all possible files of interest, saving relevant
# data
for filename in files:
if filename is not None:
recent_inst.load(fname=filename)
if notes.find("recent") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
# Determine which times to save
local_fill_val = recent_inst.meta['Kp'].fill
good_times = ((recent_inst.index >= itime) &
(recent_inst.index < stop))
good_vals = recent_inst['Kp'][good_times] != local_fill_val
# Save output data and cycle time
kp_times.extend(list(recent_inst.index[good_times][good_vals]))
kp_values.extend(list(recent_inst['Kp'][good_times][good_vals]))
itime = kp_times[-1] + pds.DateOffset(hours=3)
inst_flag = 'forecast' if forecast_inst is not None else None
notes += "{:})".format(itime.date())
# Load and save the forecast data for as many times as possible
if inst_flag == "forecast":
# Determine which files should be loaded
if len(forecast_inst.index) == 0:
files = np.unique(forecast_inst.files.files[itime:stop])
else:
files = [None] # No load needed, if already initialized
# Cycle through all possible files of interest, saving relevant
# data
for filename in files:
if filename is not None:
forecast_inst.load(fname=filename)
if notes.find("forecast") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
# Determine which times to save
local_fill_val = forecast_inst.meta['Kp'].fill
good_times = ((forecast_inst.index >= itime) &
(forecast_inst.index < stop))
good_vals = forecast_inst['Kp'][good_times] != local_fill_val
# Save desired data and cycle time
kp_times.extend(list(forecast_inst.index[good_times][good_vals]))
kp_values.extend(list(forecast_inst['Kp'][good_times][good_vals]))
itime = kp_times[-1] + pds.DateOffset(hours=3)
notes += "{:})".format(itime.date())
inst_flag = None
if inst_flag is not None:
notes += "{:})".format(itime.date())
# Determine if the beginning or end of the time series needs to be padded
freq = None if len(kp_times) < 2 else pysat.utils.time.calc_freq(kp_times)
date_range = pds.date_range(start=start, end=stop-pds.DateOffset(days=1),
freq=freq)
if len(kp_times) == 0:
kp_times = date_range
if date_range[0] < kp_times[0]:
# Extend the time and value arrays from their beginning with fill
# values
itime = abs(date_range - kp_times[0]).argmin()
kp_times.reverse()
kp_values.reverse()
extend_times = list(date_range[:itime])
extend_times.reverse()
kp_times.extend(extend_times)
kp_values.extend([fill_val for kk in extend_times])
kp_times.reverse()
kp_values.reverse()
if date_range[-1] > kp_times[-1]:
# Extend the time and value arrays from their end with fill values
itime = abs(date_range - kp_times[-1]).argmin() + 1
extend_times = list(date_range[itime:])
kp_times.extend(extend_times)
kp_values.extend([fill_val for kk in extend_times])
# Save output data
kp_inst.data = pds.DataFrame(kp_values, columns=['Kp'], index=kp_times)
# Resample the output data, filling missing values
if(date_range.shape != kp_inst.index.shape or
abs(date_range - kp_inst.index).max().total_seconds() > 0.0):
kp_inst.data = kp_inst.data.resample(freq).fillna(method=None)
if np.isfinite(fill_val):
kp_inst.data[np.isnan(kp_inst.data)] = fill_val
# Update the metadata notes for this custom procedure
notes += ", in that order"
kp_inst.meta.__setitem__('Kp', {kp_inst.meta.notes_label: notes})
return kp_inst
def combine_f107(standard_inst, forecast_inst, start=None, stop=None):
""" Combine the output from the measured and forecasted F10.7 sources
Parameters
----------
standard_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'f107' name,
and '', 'all', 'prelim', or 'daily' tag
forecast_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'f107' name,
and 'prelim', '45day' or 'forecast' tag
start : (dt.datetime or NoneType)
Starting time for combining data, or None to use earliest loaded
date from the pysat Instruments (default=None)
stop : (dt.datetime)
Ending time for combining data, or None to use the latest loaded date
from the pysat Instruments (default=None)
Returns
-------
f107_inst : (pysat.Instrument)
Instrument object containing F10.7 observations for the desired period
of time, merging the standard, 45day, and forecasted values based on
their reliability
Notes
-----
Merging prioritizes the standard data, then the 45day data, and finally
the forecast data
Will not attempt to download any missing data, but will load data
"""
# Initialize metadata and flags
notes = "Combines data from"
stag = standard_inst.tag if len(standard_inst.tag) > 0 else 'default'
tag = 'combined_{:s}_{:s}'.format(stag, forecast_inst.tag)
inst_flag = 'standard'
# If the start or stop times are not defined, get them from the Instruments
if start is None:
stimes = [inst.index.min() for inst in [standard_inst, forecast_inst]
if len(inst.index) > 0]
start = min(stimes) if len(stimes) > 0 else None
if stop is None:
stimes = [inst.index.max() for inst in [standard_inst, forecast_inst]
if len(inst.index) > 0]
stop = max(stimes) + pds.DateOffset(days=1) if len(stimes) > 0 else None
if start is None or stop is None:
raise ValueError("must either load in Instrument objects or provide" +
" starting and ending times")
if start >= stop:
raise ValueError("date range is zero or negative")
# Initialize the output instrument
f107_inst = pysat.Instrument()
f107_inst.platform = standard_inst.platform
f107_inst.name = standard_inst.name
f107_inst.tag = tag
f107_inst.date = start
f107_inst.doy = int(start.strftime("%j"))
fill_val = None
f107_times = list()
f107_values = list()
# Cycle through the desired time range
itime = start
while itime < stop and inst_flag is not None:
# Load and save the standard data for as many times as possible
if inst_flag == 'standard':
# Test to see if data loading is needed
if not np.any(standard_inst.index == itime):
if standard_inst.tag == 'daily':
# Add 30 days
standard_inst.load(date=itime+pds.DateOffset(days=30))
else:
standard_inst.load(date=itime)
good_times = ((standard_inst.index >= itime) &
(standard_inst.index < stop))
if notes.find("standard") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
if np.any(good_times):
if fill_val is None:
f107_inst.meta = standard_inst.meta
fill_val = f107_inst.meta['f107'].fill
good_vals = standard_inst['f107'][good_times] != fill_val
f107_times.extend(list(standard_inst.index[good_times][good_vals]))
f107_values.extend(list(standard_inst['f107'][good_times][good_vals]))
itime = f107_times[-1] + | pds.DateOffset(days=1) | pandas.DateOffset |
import json
import time
import multiprocessing as mp
import urllib.request
import pandas as pd
import coordinates_transform as ct
ak = ''
def geo_coding():
global ak
filename = r'F:\data\form\hn-illegal\csv\hn_accidents.csv'
dataset = | pd.read_csv(filename, low_memory=False) | pandas.read_csv |
import pandas as pd
import numpy as np
class GroupedDataFrame(pd.DataFrame):
"""
Grouped DataFrame
This is just a :class:`pandas.DataFrame` with information
on how to do the grouping.
"""
# See: subclassing-pandas-data-structures at
# http://pandas.pydata.org/pandas-docs/stable/internals.html
_metadata = ['plydata_groups']
plydata_groups = None
def __init__(self, data=None, groups=None, **kwargs):
super().__init__(data=data, **kwargs)
if groups is not None:
self.plydata_groups = list( | pd.unique(groups) | pandas.unique |
from typing import List, Dict, TYPE_CHECKING, Optional, Union, Tuple
from gym import spaces
import numpy as np
import pandas as pd
from highway_env import utils
from highway_env.envs.common.finite_mdp import compute_ttc_grid
from highway_env.envs.common.graphics import EnvViewer
from highway_env.road.lane import AbstractLane
from highway_env.utils import distance_to_circle
from highway_env.vehicle.controller import MDPVehicle
if TYPE_CHECKING:
from highway_env.envs.common.abstract import AbstractEnv
class ObservationType(object):
def __init__(self, env: 'AbstractEnv', **kwargs) -> None:
self.env = env
self.__observer_vehicle = None
def space(self) -> spaces.Space:
"""Get the observation space."""
raise NotImplementedError()
def observe(self):
"""Get an observation of the environment state."""
raise NotImplementedError()
@property
def observer_vehicle(self):
"""
The vehicle observing the scene.
If not set, the first controlled vehicle is used by default.
"""
return self.__observer_vehicle or self.env.vehicle
@observer_vehicle.setter
def observer_vehicle(self, vehicle):
self.__observer_vehicle = vehicle
class GrayscaleObservation(ObservationType):
"""
An observation class that collects directly what the simulator renders.
Also stacks the collected frames as in the nature DQN.
The observation shape is C x W x H.
Specific keys are expected in the configuration dictionary passed.
Example of observation dictionary in the environment config:
observation": {
"type": "GrayscaleObservation",
"observation_shape": (84, 84)
"stack_size": 4,
"weights": [0.2989, 0.5870, 0.1140], # weights for RGB conversion,
}
"""
def __init__(self, env: 'AbstractEnv',
observation_shape: Tuple[int, int],
stack_size: int,
weights: List[float],
scaling: Optional[float] = None,
centering_position: Optional[List[float]] = None,
**kwargs) -> None:
super().__init__(env)
self.observation_shape = observation_shape
self.shape = (stack_size, ) + self.observation_shape
self.weights = weights
self.obs = np.zeros(self.shape)
# The viewer configuration can be different between this observation and env.render() (typically smaller)
viewer_config = env.config.copy()
viewer_config.update({
"offscreen_rendering": True,
"screen_width": self.observation_shape[0],
"screen_height": self.observation_shape[1],
"scaling": scaling or viewer_config["scaling"],
"centering_position": centering_position or viewer_config["centering_position"]
})
self.viewer = EnvViewer(env, config=viewer_config)
def space(self) -> spaces.Space:
return spaces.Box(shape=self.shape, low=0, high=255, dtype=np.uint8)
def observe(self) -> np.ndarray:
new_obs = self._render_to_grayscale()
self.obs = np.roll(self.obs, -1, axis=0)
self.obs[-1, :, :] = new_obs
return self.obs
def _render_to_grayscale(self) -> np.ndarray:
# TODO: center rendering on the observer vehicle
self.viewer.display()
raw_rgb = self.viewer.get_image() # H x W x C
raw_rgb = np.moveaxis(raw_rgb, 0, 1)
return np.dot(raw_rgb[..., :3], self.weights).clip(0, 255).astype(np.uint8)
class TimeToCollisionObservation(ObservationType):
def __init__(self, env: 'AbstractEnv', horizon: int = 10, **kwargs: dict) -> None:
super().__init__(env)
self.horizon = horizon
def space(self) -> spaces.Space:
try:
return spaces.Box(shape=self.observe().shape, low=0, high=1, dtype=np.float32)
except AttributeError:
return spaces.Space()
def observe(self) -> np.ndarray:
if not self.env.road:
return np.zeros((3, 3, int(self.horizon * self.env.config["policy_frequency"])))
grid = compute_ttc_grid(self.env, vehicle=self.observer_vehicle,
time_quantization=1/self.env.config["policy_frequency"], horizon=self.horizon)
padding = np.ones(np.shape(grid))
padded_grid = np.concatenate([padding, grid, padding], axis=1)
obs_lanes = 3
l0 = grid.shape[1] + self.observer_vehicle.lane_index[2] - obs_lanes // 2
lf = grid.shape[1] + self.observer_vehicle.lane_index[2] + obs_lanes // 2
clamped_grid = padded_grid[:, l0:lf+1, :]
repeats = np.ones(clamped_grid.shape[0])
repeats[np.array([0, -1])] += clamped_grid.shape[0]
padded_grid = np.repeat(clamped_grid, repeats.astype(int), axis=0)
obs_speeds = 3
v0 = grid.shape[0] + self.observer_vehicle.speed_index - obs_speeds // 2
vf = grid.shape[0] + self.observer_vehicle.speed_index + obs_speeds // 2
clamped_grid = padded_grid[v0:vf + 1, :, :]
return clamped_grid
class KinematicObservation(ObservationType):
"""Observe the kinematics of nearby vehicles."""
FEATURES: List[str] = ['presence', 'x', 'y', 'vx', 'vy']
def __init__(self, env: 'AbstractEnv',
features: List[str] = None,
vehicles_count: int = 5,
features_range: Dict[str, List[float]] = None,
absolute: bool = False,
order: str = "sorted",
normalize: bool = True,
clip: bool = True,
see_behind: bool = False,
observe_intentions: bool = False,
**kwargs: dict) -> None:
"""
:param env: The environment to observe
:param features: Names of features used in the observation
:param vehicles_count: Number of observed vehicles
:param absolute: Use absolute coordinates
:param order: Order of observed vehicles. Values: sorted, shuffled
:param normalize: Should the observation be normalized
:param clip: Should the value be clipped in the desired range
:param see_behind: Should the observation contains the vehicles behind
:param observe_intentions: Observe the destinations of other vehicles
"""
super().__init__(env)
self.features = features or self.FEATURES
self.vehicles_count = vehicles_count
self.features_range = features_range
self.absolute = absolute
self.order = order
self.normalize = normalize
self.clip = clip
self.see_behind = see_behind
self.observe_intentions = observe_intentions
def space(self) -> spaces.Space:
return spaces.Box(shape=(self.vehicles_count, len(self.features)), low=-1, high=1, dtype=np.float32)
def normalize_obs(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Normalize the observation values.
For now, assume that the road is straight along the x axis.
:param Dataframe df: observation data
"""
if not self.features_range:
side_lanes = self.env.road.network.all_side_lanes(self.observer_vehicle.lane_index)
self.features_range = {
"x": [-5.0 * MDPVehicle.SPEED_MAX, 5.0 * MDPVehicle.SPEED_MAX],
"y": [-AbstractLane.DEFAULT_WIDTH * len(side_lanes), AbstractLane.DEFAULT_WIDTH * len(side_lanes)],
"vx": [-2*MDPVehicle.SPEED_MAX, 2*MDPVehicle.SPEED_MAX],
"vy": [-2*MDPVehicle.SPEED_MAX, 2*MDPVehicle.SPEED_MAX]
}
for feature, f_range in self.features_range.items():
if feature in df:
df[feature] = utils.lmap(df[feature], [f_range[0], f_range[1]], [-1, 1])
if self.clip:
df[feature] = np.clip(df[feature], -1, 1)
return df
def observe(self) -> np.ndarray:
if not self.env.road:
return np.zeros(self.space().shape)
# Add ego-vehicle
df = pd.DataFrame.from_records([self.observer_vehicle.to_dict()])[self.features]
# Add nearby traffic
# sort = self.order == "sorted"
close_vehicles = self.env.road.close_vehicles_to(self.observer_vehicle,
self.env.PERCEPTION_DISTANCE,
count=self.vehicles_count - 1,
see_behind=self.see_behind)
if close_vehicles:
origin = self.observer_vehicle if not self.absolute else None
df = df.append(pd.DataFrame.from_records(
[v.to_dict(origin, observe_intentions=self.observe_intentions)
for v in close_vehicles[-self.vehicles_count + 1:]])[self.features],
ignore_index=True)
# Normalize and clip
if self.normalize:
df = self.normalize_obs(df)
# Fill missing rows
if df.shape[0] < self.vehicles_count:
rows = np.zeros((self.vehicles_count - df.shape[0], len(self.features)))
df = df.append(pd.DataFrame(data=rows, columns=self.features), ignore_index=True)
# Reorder
df = df[self.features]
obs = df.values.copy()
if self.order == "shuffled":
self.env.np_random.shuffle(obs[1:])
# Flatten
return obs
class OccupancyGridObservation(ObservationType):
"""Observe an occupancy grid of nearby vehicles."""
FEATURES: List[str] = ['presence', 'vx', 'vy']
GRID_SIZE: List[List[float]] = [[-5.5*5, 5.5*5], [-5.5*5, 5.5*5]]
GRID_STEP: List[int] = [5, 5]
def __init__(self,
env: 'AbstractEnv',
features: Optional[List[str]] = None,
grid_size: Optional[List[List[float]]] = None,
grid_step: Optional[List[int]] = None,
features_range: Dict[str, List[float]] = None,
absolute: bool = False,
**kwargs: dict) -> None:
"""
:param env: The environment to observe
:param features: Names of features used in the observation
:param vehicles_count: Number of observed vehicles
"""
super().__init__(env)
self.features = features if features is not None else self.FEATURES
self.grid_size = np.array(grid_size) if grid_size is not None else np.array(self.GRID_SIZE)
self.grid_step = np.array(grid_step) if grid_step is not None else np.array(self.GRID_STEP)
grid_shape = np.asarray(np.floor((self.grid_size[:, 1] - self.grid_size[:, 0]) / self.grid_step), dtype=np.int)
self.grid = np.zeros((len(self.features), *grid_shape))
self.features_range = features_range
self.absolute = absolute
def space(self) -> spaces.Space:
return spaces.Box(shape=self.grid.shape, low=-1, high=1, dtype=np.float32)
def normalize(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Normalize the observation values.
For now, assume that the road is straight along the x axis.
:param Dataframe df: observation data
"""
if not self.features_range:
self.features_range = {
"vx": [-2*MDPVehicle.SPEED_MAX, 2*MDPVehicle.SPEED_MAX],
"vy": [-2*MDPVehicle.SPEED_MAX, 2*MDPVehicle.SPEED_MAX]
}
for feature, f_range in self.features_range.items():
if feature in df:
df[feature] = utils.lmap(df[feature], [f_range[0], f_range[1]], [-1, 1])
return df
def observe(self) -> np.ndarray:
if not self.env.road:
return np.zeros(self.space().shape)
if self.absolute:
raise NotImplementedError()
else:
# Add nearby traffic
self.grid.fill(0)
df = pd.DataFrame.from_records(
[v.to_dict(self.observer_vehicle) for v in self.env.road.vehicles])
# Normalize
df = self.normalize(df)
# Fill-in features
for layer, feature in enumerate(self.features):
for _, vehicle in df.iterrows():
x, y = vehicle["x"], vehicle["y"]
# Recover unnormalized coordinates for cell index
if "x" in self.features_range:
x = utils.lmap(x, [-1, 1], [self.features_range["x"][0], self.features_range["x"][1]])
if "y" in self.features_range:
y = utils.lmap(y, [-1, 1], [self.features_range["y"][0], self.features_range["y"][1]])
cell = (int((x - self.grid_size[0, 0]) / self.grid_step[0]),
int((y - self.grid_size[1, 0]) / self.grid_step[1]))
if 0 <= cell[1] < self.grid.shape[-2] and 0 <= cell[0] < self.grid.shape[-1]:
self.grid[layer, cell[1], cell[0]] = vehicle[feature]
# Clip
obs = np.clip(self.grid, -1, 1)
return obs
class KinematicsGoalObservation(KinematicObservation):
def __init__(self, env: 'AbstractEnv', scales: List[float], **kwargs: dict) -> None:
self.scales = np.array(scales)
super().__init__(env, **kwargs)
def space(self) -> spaces.Space:
try:
obs = self.observe()
return spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=obs["desired_goal"].shape, dtype=np.float32),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype=np.float32),
observation=spaces.Box(-np.inf, np.inf, shape=obs["observation"].shape, dtype=np.float32),
))
except AttributeError:
return spaces.Space()
def observe(self) -> Dict[str, np.ndarray]:
if not self.observer_vehicle:
return {
"observation": np.zeros((len(self.features),)),
"achieved_goal": np.zeros((len(self.features),)),
"desired_goal": np.zeros((len(self.features),))
}
obs = np.ravel(pd.DataFrame.from_records([self.observer_vehicle.to_dict()])[self.features])
goal = np.ravel(pd.DataFrame.from_records([self.env.goal.to_dict()])[self.features])
obs = {
"observation": obs / self.scales,
"achieved_goal": obs / self.scales,
"desired_goal": goal / self.scales
}
return obs
class AttributesObservation(ObservationType):
def __init__(self, env: 'AbstractEnv', attributes: List[str], **kwargs: dict) -> None:
self.env = env
self.attributes = attributes
def space(self) -> spaces.Space:
try:
obs = self.observe()
return spaces.Dict({
attribute: spaces.Box(-np.inf, np.inf, shape=obs[attribute].shape, dtype=np.float32)
for attribute in self.attributes
})
except AttributeError:
return spaces.Space()
def observe(self) -> Dict[str, np.ndarray]:
return {
attribute: getattr(self.env, attribute) for attribute in self.attributes
}
class MyObservation(ObservationType):
"""Observe a Lidar of nearby vehicles."""
LENGTH = 5.0
""" Vehicle length [m] """
WIDTH = 2.0
""" Vehicle width [m] """
FEATURES: List[str] = ['static', 'dynamic']
GRID_SIZE: List[List[float]] = [[-5.5*5, 5.5*5], [-5.5*5, 5.5*5]]
GRID_STEP: List[int] = [5, 5]
ANGLE_LIMIT: List[float] = [0., 2*np.pi, np.deg2rad(1)]
RANGE_LIMIT: List[float] = [0., 100.]
def __init__(self,
env: 'AbstractEnv',
features: Optional[List[str]] = None,
grid_size: Optional[List[List[float]]] = None,
grid_step: Optional[List[int]] = None,
angle_limit: Optional[List[float]] = None,
range_limit: Optional[List[float]] = None,
**kwargs: dict) -> None:
"""
:param env: The environment to observe
:param features: Names of features used in the observation
:param vehicles_count: Number of observed vehicles
"""
self.env = env
self.features = features if features is not None else self.FEATURES
self.grid_size = np.array(grid_size) if grid_size is not None else np.array(self.GRID_SIZE)
self.grid_step = np.array(grid_step) if grid_step is not None else np.array(self.GRID_STEP)
grid_shape = np.asarray(np.floor((self.grid_size[:, 1] - self.grid_size[:, 0]) / self.grid_step), dtype=np.int)
self.grid = np.zeros((len(self.features), *grid_shape))
# Lidar limitation
self.angle_limit = np.array(angle_limit) if angle_limit is not None else np.array(self.ANGLE_LIMIT)
self.range_limit = np.array(range_limit) if range_limit is not None else np.array(self.RANGE_LIMIT)
self.__lidar_init()
def space(self) -> spaces.Space:
return spaces.Box(shape=self.grid.shape, low=0, high=1, dtype=np.float32)
def observe(self) -> np.ndarray:
if not self.env.road:
obs = np.zeros(self.space().shape)
else:
obs = self.__lidar()
# obs = np.clip(self.grid, -1, 1)
return obs
def __lidar_init(self):
angle_min, angle_max, angle_step_size = self.angle_limit
range_shape = np.asarray(abs(angle_max - angle_min) / angle_step_size, dtype=int)
self.range = np.zeros(range_shape, dtype=np.float32)
self.lidar_endpoint = np.zeros_like(self.range)
# data = {'angle': np.arange(angle_min, angle_max, angle_step_size)}
# self.df_lidar = pd.DataFrame(data, columns=['angle', 'x_min', 'y_min', 'x_max', 'y_max', 'intersect', 'x_i', 'y_i', 'distance'])
# self.df_lidar['intersect'] = False
def __lidar(self) -> np.ndarray:
if not self.env.road:
return np.zeros(self.space().shape)
df_v = pd.DataFrame.from_records([v.to_dict(self.env.vehicle) for v in self.env.road.vehicles])
df_v['distance'] = utils.distance([0., 0.],[df_v['x'], df_v['y']])
df_v['x1'] = np.nan
df_v['y1'] = np.nan
df_v['x2'] = np.nan
df_v['y2'] = np.nan
df_v['x3'] = np.nan
df_v['y3'] = np.nan
df_v['x4'] = np.nan
df_v['y4'] = np.nan
for i in df_v.index:
[x1, y1], [x2, y2], [x3, y3], [x4, y4] = utils.corner_point(([df_v['x'][i], df_v['y'][i]],
self.LENGTH,
self.WIDTH,
[df_v['cos_h'][i], df_v['sin_h'][i]]))
df_v['x1'][i] = x1
df_v['y1'][i] = y1
df_v['x2'][i] = x2
df_v['y2'][i] = y2
df_v['x3'][i] = x3
df_v['y3'][i] = y3
df_v['x4'][i] = x4
df_v['y4'][i] = y4
angle_min, angle_max, angle_step_size = self.angle_limit
# data = {'angle': np.arange(angle_min, angle_max, angle_step_size)}
data = {'angle': np.arange(angle_min, angle_max, angle_step_size) + np.arccos(df_v['cos_h'][0])}
df_lidar = | pd.DataFrame(data, columns=['angle', 'x_min', 'y_min', 'x_max', 'y_max', 'intersect', 'x_i', 'y_i', 'distance']) | pandas.DataFrame |
import aiohttp
import asyncio
import time
import argparse
import numpy as np
import pandas as pd
import os
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--dir', action="store")
parser.add_argument('-s', '--service', action="store")
args = parser.parse_args()
result_dir = args.dir
if not os.path.exists(result_dir):
os.makedirs(result_dir)
serviceType = args.service
if serviceType == "local":
SERVER_URL = "http://localhost:8080/tcp"
if serviceType == "clusterIP":
SERVER_URL = 'http://172.16.17.32:31619/tcp'
if serviceType == "nodePort":
SERVER_URL = 'http://192.168.3.11:31234/tcp'
if serviceType == "Host":
SERVER_URL = 'http://192.168.3.11:8080/tcp'
async def get_action(seq_no):
send_time = time.monotonic()
async with aiohttp.ClientSession() as session:
print('get')
async with session.post(SERVER_URL, data=b'ddd') as response:
r = await response.text()
receive_time = time.monotonic()
delay = receive_time - send_time
return seq_no, delay
async def main(period):
seq_no = 0
pending_tasks = set()
done_tasks = set()
# SERVER_URL = 'http://192.168.3.11:31234'
# SERVER_URL = 'http://192.168.3.11:8080'
df_array = np.empty([15000, 2])
uf_array = np.empty([15000, 4])
ind = 0
indu = 0
current_time = time.monotonic()
next_step = current_time
while True:
start_time = time.monotonic()
next_step += period
for task in done_tasks:
seq_num, delay = task.result()
df_array[ind] = [seq_num, delay]
ind += 1
if ind >= 15000:
break
if indu >= 15000:
break
seq_no += 1
await asyncio.sleep(0.002)
uf_array[indu] = [start_time, time.monotonic() - start_time, len(pending_tasks), len(done_tasks)]
indu += 1
pending_tasks.add(asyncio.create_task(get_action(seq_no)))
(done_tasks, pending_tasks) = await asyncio.wait(
pending_tasks,
return_when=asyncio.ALL_COMPLETED,
timeout=max(0, next_step - time.monotonic())
)
return df_array, uf_array
if __name__ == "__main__":
loop = asyncio.get_event_loop()
period = 0.02
main_group = asyncio.gather(main(period))
result = loop.run_until_complete(main_group)
columns = ['seq_no', 'delay']
ucolumns = ['time', 'clien_execution', 'pending', 'done']
print(result)
df = pd.DataFrame(result[0][0], columns=columns)
uf = | pd.DataFrame(result[0][1], columns=ucolumns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=too-many-instance-attributes
"""
Hive-Reporting provides easy to read case metrics supporting team contirubtions
and frequency without the need to access or create custom report in
The Hive Dashboard
"""
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import smtplib
import time
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
from thehive4py.api import TheHiveApi
import pandas as pd
API = TheHiveApi("", "")
SMTP_SERVER = ""
SENT_TO = "comma,seperated,as,needed"
class SIRPPipeline(object):
"""Security Incident Response Platform prosessing pipeline.
Attributes:
TIME_FMT (str): Time format.
data_frame_INDEX (list): List of desired parsed values from dictionary.
"""
TIME_FMT = "%m/%d/%Y %H:%M:%S"
data_frame_INDEX = ["Created", "Severity", "Owner", "Name", "Closed", "Resolution"]
counts_frame_INDEX = [
"totals",
"Team.Member",
"Team.Member1",
"Team.Member2",
"Team.Member3",
"Team.Member4",
"Team.Member5",
"Team.Member6",
"Duplicated",
"TruePositive",
"FalsePositive",
int("1"),
int("2"),
int("3"),
]
def __init__(
self, api,
):
"""
Security Incident Response Platform prosessing pipeline.
Accepts API object on initialization phase.
"""
self._api = api
self._api_response = self._api.find_cases(range="all", sort=[])
self._all30_dict = {}
self._all60_dict = {}
self._all90_dict = {}
self._data_frame_30days = None
self._data_frame_60days = None
self._data_frame_90days = None
self._data_frame_counts = None
self._dataset = None
def _load_data(self):
"""Finds all cases on SIRP endpoint
Returns:
(obj): api_response
"""
if self._api_response.status_code == 200:
self._dataset = self._api_response.json()
self._fill_day_dicts()
@staticmethod
def _add_record(days_dict, record, key):
"""creates objects for dictionary
(obj): Name
(obj): Owner
(obj): Severity
(obj): Created
(obj): Closed
(obj): Resolution
"""
days_dict[key] = {
"Name": record["title"],
"Owner": record["owner"],
"Severity": record["severity"],
"Created": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record["createdAt"] / 1000.0))),
}
if "endDate" in record:
days_dict[key].update(
{
"Closed": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record["endDate"] / 1000.0),)),
"Resolution": record["resolutionStatus"],
}
)
def _fill_day_dicts(self):
"""Set keys for dictionary based on comparitive EPOCH
(obj): self._all30_dict
(obj): self._all60_dict
(obj): self._all90_dict
Returns:
Date corrected (obj)
"""
today = datetime.date.today()
for i, record in enumerate(self._dataset):
if (record["createdAt"] / 1000) > time.mktime((today - datetime.timedelta(days=30)).timetuple()):
self._add_record(self._all30_dict, record, key=i)
elif (record["createdAt"] / 1000) > time.mktime((today - datetime.timedelta(days=60)).timetuple()):
self._add_record(self._all60_dict, record, key=i)
else:
self._add_record(self._all90_dict, record, key=i)
def make_dataframes(self):
"""Creates (4) pandas dataframes
(obj): data_frame_30day
(obj): data_frame_60days
(obj): data_frame_90days
(obj): data_frame_counts
"""
self._data_frame_30days = pd.DataFrame(self._all30_dict, index=SIRPPipeline.data_frame_INDEX).transpose()
self._data_frame_60days = | pd.DataFrame(self._all60_dict, index=SIRPPipeline.data_frame_INDEX) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import inspect
from ..log import get_module_logger
from . import strategy as strategy_pool
from .strategy.strategy import BaseStrategy
from .backtest.exchange import Exchange
from .backtest.backtest import backtest as backtest_func, get_date_range
from ..data import D
from ..config import C
from ..data.dataset.utils import get_level_index
from ..utils import init_instance_by_config
logger = get_module_logger("Evaluate")
def risk_analysis(r, N=252):
"""Risk Analysis
Parameters
----------
r : pandas.Series
daily return series.
N: int
scaler for annualizing information_ratio (day: 250, week: 50, month: 12).
"""
mean = r.mean()
std = r.std(ddof=1)
annualized_return = mean * N
information_ratio = mean / std * np.sqrt(N)
max_drawdown = (r.cumsum() - r.cumsum().cummax()).min()
data = {
"mean": mean,
"std": std,
"annualized_return": annualized_return,
"information_ratio": information_ratio,
"max_drawdown": max_drawdown,
}
res = pd.Series(data, index=data.keys()).to_frame("risk")
return res
def get_strategy(
strategy=None,
topk=50,
margin=0.5,
n_drop=5,
risk_degree=0.95,
str_type="dropout",
adjust_dates=None,
):
"""get_strategy
There will be 3 ways to return a stratgy. Please follow the code.
Parameters
----------
strategy : Strategy()
strategy used in backtest.
topk : int (Default value: 50)
top-N stocks to buy.
margin : int or float(Default value: 0.5)
- if isinstance(margin, int):
sell_limit = margin
- else:
sell_limit = pred_in_a_day.count() * margin
buffer margin, in single score_mode, continue holding stock if it is in nlargest(sell_limit).
sell_limit should be no less than topk.
n_drop : int
number of stocks to be replaced in each trading date.
risk_degree: float
0-1, 0.95 for example, use 95% money to trade.
str_type: 'amount', 'weight' or 'dropout'
strategy type: TopkAmountStrategy ,TopkWeightStrategy or TopkDropoutStrategy.
Returns
-------
:class: Strategy
an initialized strategy object
"""
# There will be 3 ways to return a strategy.
if strategy is None:
# 1) create strategy with param `strategy`
str_cls_dict = {
"amount": "TopkAmountStrategy",
"weight": "TopkWeightStrategy",
"dropout": "TopkDropoutStrategy",
}
logger.info("Create new streategy ")
str_cls = getattr(strategy_pool, str_cls_dict.get(str_type))
strategy = str_cls(
topk=topk,
buffer_margin=margin,
n_drop=n_drop,
risk_degree=risk_degree,
adjust_dates=adjust_dates,
)
elif isinstance(strategy, (dict, str)):
# 2) create strategy with init_instance_by_config
strategy = init_instance_by_config(strategy)
# else: nothing happens. 3) Use the strategy directly
if not isinstance(strategy, BaseStrategy):
raise TypeError("Strategy not supported")
return strategy
def get_exchange(
pred,
exchange=None,
subscribe_fields=[],
open_cost=0.0015,
close_cost=0.0025,
min_cost=5.0,
trade_unit=None,
limit_threshold=None,
deal_price=None,
extract_codes=False,
shift=1,
):
"""get_exchange
Parameters
----------
# exchange related arguments
exchange: Exchange().
subscribe_fields: list
subscribe fields.
open_cost : float
open transaction cost.
close_cost : float
close transaction cost.
min_cost : float
min transaction cost.
trade_unit : int
100 for China A.
deal_price: str
dealing price type: 'close', 'open', 'vwap'.
limit_threshold : float
limit move 0.1 (10%) for example, long and short with same limit.
extract_codes: bool
will we pass the codes extracted from the pred to the exchange.
NOTE: This will be faster with offline qlib.
Returns
-------
:class: Exchange
an initialized Exchange object
"""
if trade_unit is None:
trade_unit = C.trade_unit
if limit_threshold is None:
limit_threshold = C.limit_threshold
if deal_price is None:
deal_price = C.deal_price
if exchange is None:
logger.info("Create new exchange")
# handle exception for deal_price
if deal_price[0] != "$":
deal_price = "$" + deal_price
if extract_codes:
codes = sorted(pred.index.get_level_values("instrument").unique())
else:
codes = "all" # TODO: We must ensure that 'all.txt' includes all the stocks
dates = sorted(pred.index.get_level_values("datetime").unique())
dates = np.append(dates, get_date_range(dates[-1], left_shift=1, right_shift=shift))
exchange = Exchange(
trade_dates=dates,
codes=codes,
deal_price=deal_price,
subscribe_fields=subscribe_fields,
limit_threshold=limit_threshold,
open_cost=open_cost,
close_cost=close_cost,
min_cost=min_cost,
trade_unit=trade_unit,
)
return exchange
# This is the API for compatibility for legacy code
def backtest(pred, account=1e9, shift=1, benchmark="SH000905", verbose=True, **kwargs):
"""This function will help you set a reasonable Exchange and provide default value for strategy
Parameters
----------
- **backtest workflow related or commmon arguments**
pred : pandas.DataFrame
predict should has <datetime, instrument> index and one `score` column.
account : float
init account value.
shift : int
whether to shift prediction by one day.
benchmark : str
benchmark code, default is SH000905 CSI 500.
verbose : bool
whether to print log.
- **strategy related arguments**
strategy : Strategy()
strategy used in backtest.
topk : int (Default value: 50)
top-N stocks to buy.
margin : int or float(Default value: 0.5)
- if isinstance(margin, int):
sell_limit = margin
- else:
sell_limit = pred_in_a_day.count() * margin
buffer margin, in single score_mode, continue holding stock if it is in nlargest(sell_limit).
sell_limit should be no less than topk.
n_drop : int
number of stocks to be replaced in each trading date.
risk_degree: float
0-1, 0.95 for example, use 95% money to trade.
str_type: 'amount', 'weight' or 'dropout'
strategy type: TopkAmountStrategy ,TopkWeightStrategy or TopkDropoutStrategy.
- **exchange related arguments**
exchange: Exchange()
pass the exchange for speeding up.
subscribe_fields: list
subscribe fields.
open_cost : float
open transaction cost. The default value is 0.002(0.2%).
close_cost : float
close transaction cost. The default value is 0.002(0.2%).
min_cost : float
min transaction cost.
trade_unit : int
100 for China A.
deal_price: str
dealing price type: 'close', 'open', 'vwap'.
limit_threshold : float
limit move 0.1 (10%) for example, long and short with same limit.
extract_codes: bool
will we pass the codes extracted from the pred to the exchange.
.. note:: This will be faster with offline qlib.
"""
# check strategy:
spec = inspect.getfullargspec(get_strategy)
str_args = {k: v for k, v in kwargs.items() if k in spec.args}
strategy = get_strategy(**str_args)
# init exchange:
spec = inspect.getfullargspec(get_exchange)
ex_args = {k: v for k, v in kwargs.items() if k in spec.args}
trade_exchange = get_exchange(pred, **ex_args)
# run backtest
report_df, positions = backtest_func(
pred=pred,
strategy=strategy,
trade_exchange=trade_exchange,
shift=shift,
verbose=verbose,
account=account,
benchmark=benchmark,
)
# for compatibility of the old API. return the dict positions
positions = {k: p.position for k, p in positions.items()}
return report_df, positions
def long_short_backtest(
pred,
topk=50,
deal_price=None,
shift=1,
open_cost=0,
close_cost=0,
trade_unit=None,
limit_threshold=None,
min_cost=5,
subscribe_fields=[],
extract_codes=False,
):
"""
A backtest for long-short strategy
:param pred: The trading signal produced on day `T`.
:param topk: The short topk securities and long topk securities.
:param deal_price: The price to deal the trading.
:param shift: Whether to shift prediction by one day. The trading day will be T+1 if shift==1.
:param open_cost: open transaction cost.
:param close_cost: close transaction cost.
:param trade_unit: 100 for China A.
:param limit_threshold: limit move 0.1 (10%) for example, long and short with same limit.
:param min_cost: min transaction cost.
:param subscribe_fields: subscribe fields.
:param extract_codes: bool.
will we pass the codes extracted from the pred to the exchange.
NOTE: This will be faster with offline qlib.
:return: The result of backtest, it is represented by a dict.
{ "long": long_returns(excess),
"short": short_returns(excess),
"long_short": long_short_returns}
"""
if get_level_index(pred, level="datetime") == 1:
pred = pred.swaplevel().sort_index()
if trade_unit is None:
trade_unit = C.trade_unit
if limit_threshold is None:
limit_threshold = C.limit_threshold
if deal_price is None:
deal_price = C.deal_price
if deal_price[0] != "$":
deal_price = "$" + deal_price
subscribe_fields = subscribe_fields.copy()
profit_str = f"Ref({deal_price}, -1)/{deal_price} - 1"
subscribe_fields.append(profit_str)
trade_exchange = get_exchange(
pred=pred,
deal_price=deal_price,
subscribe_fields=subscribe_fields,
limit_threshold=limit_threshold,
open_cost=open_cost,
close_cost=close_cost,
min_cost=min_cost,
trade_unit=trade_unit,
extract_codes=extract_codes,
shift=shift,
)
_pred_dates = pred.index.get_level_values(level="datetime")
predict_dates = D.calendar(start_time=_pred_dates.min(), end_time=_pred_dates.max())
trade_dates = np.append(predict_dates[shift:], get_date_range(predict_dates[-1], left_shift=1, right_shift=shift))
long_returns = {}
short_returns = {}
ls_returns = {}
for pdate, date in zip(predict_dates, trade_dates):
score = pred.loc(axis=0)[pdate, :]
score = score.reset_index().sort_values(by="score", ascending=False)
long_stocks = list(score.iloc[:topk]["instrument"])
short_stocks = list(score.iloc[-topk:]["instrument"])
score = score.set_index(["datetime", "instrument"]).sort_index()
long_profit = []
short_profit = []
all_profit = []
for stock in long_stocks:
if not trade_exchange.is_stock_tradable(stock_id=stock, trade_date=date):
continue
profit = trade_exchange.get_quote_info(stock_id=stock, trade_date=date)[profit_str]
if np.isnan(profit):
long_profit.append(0)
else:
long_profit.append(profit)
for stock in short_stocks:
if not trade_exchange.is_stock_tradable(stock_id=stock, trade_date=date):
continue
profit = trade_exchange.get_quote_info(stock_id=stock, trade_date=date)[profit_str]
if np.isnan(profit):
short_profit.append(0)
else:
short_profit.append(-profit)
for stock in list(score.loc(axis=0)[pdate, :].index.get_level_values(level=0)):
# exclude the suspend stock
if trade_exchange.check_stock_suspended(stock_id=stock, trade_date=date):
continue
profit = trade_exchange.get_quote_info(stock_id=stock, trade_date=date)[profit_str]
if np.isnan(profit):
all_profit.append(0)
else:
all_profit.append(profit)
long_returns[date] = np.mean(long_profit) - np.mean(all_profit)
short_returns[date] = np.mean(short_profit) + np.mean(all_profit)
ls_returns[date] = np.mean(short_profit) + np.mean(long_profit)
return dict(
zip(
["long", "short", "long_short"],
map(pd.Series, [long_returns, short_returns, ls_returns]),
)
)
def t_run():
pred_FN = "./check_pred.csv"
pred = pd.read_csv(pred_FN)
pred["datetime"] = | pd.to_datetime(pred["datetime"]) | pandas.to_datetime |
from pyg_base._file import mkdir, path_name
from pyg_base._types import is_series, is_df, is_int, is_date, is_bool, is_str, is_float
from pyg_base._dates import dt2str, dt
from pyg_base._logger import logger
from pyg_base._as_list import as_list
import pandas as pd
import numpy as np
import jsonpickle as jp
_series = '_is_series'
_npy = '.npy'
__all__ = ['pd_to_parquet', 'pd_read_parquet', 'pd_to_npy', 'pd_read_npy', 'np_save']
def np_save(path, value, append = False):
mkdir(path)
if append:
from npy_append_array import NpyAppendArray as npa
with npa(path) as f:
f.append(value)
else:
np.save(path, value)
return path
def pd_to_npy(value, path, append = False):
"""
writes a pandas DataFrame/series to a collection of numpy files as columns.
Support append rather than overwrite
:Params:
--------
value: pd.DataFrame/Series
value to be saved
path: str
location of the form c:/test/file.npy
append: bool
if True, will append to existing files rather than overwrite
:Returns:
---------
dict of path, columns and index
These are the inputs needed by pd_read_npy
:Example:
----------
>>> import numpy as np
>>> import pandas as pd
>>> from pyg_base import *
>>> path = 'c:/temp/test.npy'
>>> value = pd.DataFrame([[1,2],[3,4]], drange(-1), ['a', 'b'])
>>> res = pd_to_npy(value, path)
>>> res
>>> {'path': 'c:/temp/test.npy', 'columns': ['a', 'b'], 'index': ['index']}
>>> df = pd_read_npy(**res)
>>> assert eq(df, value)
"""
res = dict(path = path)
if is_series(value):
df = pd.DataFrame(value)
columns = list(df.columns)
res['columns'] = columns[0]
else:
df = value
res['columns'] = columns = list(df.columns)
df = df.reset_index()
res['index'] = list(df.columns)[:-len(columns)]
if path.endswith(_npy):
path = path[:-len(_npy)]
for col in df.columns:
a = df[col].values
fname = path +'/%s%s'%(col, _npy)
np_save(fname, a, append)
return res
pd_to_npy.output = ['path', 'columns', 'index']
def pd_read_npy(path, columns, index):
"""
reads a pandas dataframe/series from a path directory containing npy files with col.npy and idx.npy names
Parameters
----------
path : str
directory where files are.
columns : str/list of str
filenames for columns. If columns is a single str, assumes we want a pd.Series
index : str/list of str
column names used as indices
Returns
-------
res : pd.DataFrame/pd.Series
"""
if path.endswith(_npy):
path = path[:-len(_npy)]
data = {col : np.load(path +'/%s%s'%(col, _npy)) for col in as_list(columns) + as_list(index)}
res = pd.DataFrame(data).set_index(index)
if isinstance(columns, str): # it is a series
res = res[columns]
return res
def pd_to_parquet(value, path, compression = 'GZIP'):
"""
a small utility to save df to parquet, extending both pd.Series and non-string columns
:Example:
-------
>>> from pyg_base import *
>>> import pandas as pd
>>> import pytest
>>> df = pd.DataFrame([[1,2],[3,4]], drange(-1), columns = [0, dt(0)])
>>> s = pd.Series([1,2,3], drange(-2))
>>> with pytest.raises(ValueError): ## must have string column names
df.to_parquet('c:/temp/test.parquet')
>>> with pytest.raises(AttributeError): ## pd.Series has no to_parquet
s.to_parquet('c:/temp/test.parquet')
>>> df_path = pd_to_parquet(df, 'c:/temp/df.parquet')
>>> series_path = pd_to_parquet(s, 'c:/temp/series.parquet')
>>> df2 = pd_read_parquet(df_path)
>>> s2 = pd_read_parquet(series_path)
>>> assert eq(df, df2)
>>> assert eq(s, s2)
"""
if is_series(value):
mkdir(path)
df = pd.DataFrame(value)
df.columns = [_series]
try:
df.to_parquet(path, compression = compression)
except ValueError:
df = pd.DataFrame({jp.dumps(k) : [v] for k,v in dict(value).items()})
df[_series] = True
df.to_parquet(path, compression = compression)
return path
elif is_df(value):
mkdir(path)
df = value.copy()
df.columns = [jp.dumps(col) for col in df.columns]
df.to_parquet(path, compression = compression)
return path
else:
return value
def pd_read_parquet(path):
"""
a small utility to read df/series from parquet, extending both pd.Series and non-string columns
:Example:
-------
>>> from pyg import *
>>> import pandas as pd
>>> import pytest
>>> df = pd.DataFrame([[1,2],[3,4]], drange(-1), columns = [0, dt(0)])
>>> s = pd.Series([1,2,3], drange(-2))
>>> with pytest.raises(ValueError): ## must have string column names
df.to_parquet('c:/temp/test.parquet')
>>> with pytest.raises(AttributeError): ## pd.Series has no to_parquet
s.to_parquet('c:/temp/test.parquet')
>>> df_path = pd_to_parquet(df, 'c:/temp/df.parquet')
>>> series_path = pd_to_parquet(s, 'c:/temp/series.parquet')
>>> df2 = pd_read_parquet(df_path)
>>> s2 = pd_read_parquet(series_path)
>>> assert eq(df, df2)
>>> assert eq(s, s2)
"""
path = path_name(path)
try:
df = | pd.read_parquet(path) | pandas.read_parquet |
import wget
import json
from tqdm import tqdm
import os
import torchtext
import spacy
import zipfile
import unicodedata
import re
from sklearn.model_selection import train_test_split
import pandas as pd
from .languageField import LanguageIndex
from torch.utils.data import Dataset, DataLoader
import numpy as np
from CacheFunc.man import cache_it
class TrainData(Dataset):
def __init__(self, X, y):
self.data = X
self.target = y
# TODO: convert this into torch code is possible
self.length = [np.sum(1 - np.equal(x, 0)) for x in X]
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
x_len = self.length[index]
return x, y, x_len
def __len__(self):
return len(self.data)
class TestData(Dataset):
def __init__(self, X, y):
self.data = X
self.target = y
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
return x, y
def __len__(self):
return len(self.data)
class QGenDataset(object):
def __init__(self):
pass
def __get_NMT__(self):
original_word_pairs = [[w for w in l.split("\t")] for l in self.nmt_raw]
self.eng = [i[0] for i in original_word_pairs]
self.spa = [i[1] for i in original_word_pairs]
def get_AQ(self, max_len=80, sample=True):
raw_data = {
"ans": [line[0] for line in self.data],
"que": [line[1] for line in self.data],
}
df = | pd.DataFrame(raw_data, columns=["ans", "que"]) | pandas.DataFrame |
import sys
import os
import unittest
import collections
import numpy as np
import pandas as pd
import scipy.sparse
import anndata
import matplotlib
matplotlib.use("agg")
if os.environ.get("TEST_MODE", "INSTALL") == "DEV":
sys.path.insert(0, "..")
import Cell_BLAST as cb
class TestHybridPath(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.m1 = np.array([
[2, 1, 0],
[3, 0, 0],
[0, 4, 5]
])
cls.v2 = np.array(["a", "s", "d"])
cls.d3 = {
"m1": cls.m1,
"v2": cls.v2
}
cls.s4 = "asd"
def test_hybrid_path(self):
cb.data.write_hybrid_path(self.m1, "./test.h5//a")
cb.data.write_hybrid_path(self.v2, "./test.h5//b/c")
cb.data.write_hybrid_path(self.d3, "./test.h5//b/d/e")
cb.data.write_hybrid_path(self.s4, "./test.h5//f")
self.assertTrue(cb.data.check_hybrid_path("./test.h5//b/c"))
self.assertFalse(cb.data.check_hybrid_path("./test.h5//b/f"))
self.assertFalse(cb.data.check_hybrid_path("./asd.h5//b/f"))
m1 = cb.data.read_hybrid_path("./test.h5//a")
v2 = cb.data.read_hybrid_path("./test.h5//b/c")
d3 = cb.data.read_hybrid_path("./test.h5//b/d/e")
s4 = cb.data.read_hybrid_path("./test.h5//f")
self.assertTrue(np.all(self.m1 == m1))
self.assertTrue(np.all(self.v2 == v2))
self.assertTrue(
np.all(self.d3["m1"] == d3["m1"]) and
np.all(self.d3["v2"] == d3["v2"])
)
self.assertEqual(self.s4, s4)
@classmethod
def tearDownClass(cls):
if os.path.exists("./test.h5"):
os.remove("./test.h5")
class TestDenseExprDataSet(unittest.TestCase):
def setUp(self):
self.exprs = np.array([
[2, 3, 0],
[1, 0, 4],
[0, 0, 5]
])
self.var = pd.DataFrame({"column": [1, 2, 3]}, index=["a", "b", "c"])
self.obs = pd.DataFrame({"column": [1, 1, 1]}, index=["d", "e", "f"])
self.uns = {"item": np.array(["a", "c"]), "blah": np.array(["blah"])}
self.ds = cb.data.ExprDataSet(
exprs=self.exprs, obs=self.obs, var=self.var, uns=self.uns)
def test_constructor(self):
with self.assertRaises(AssertionError):
_ = cb.data.ExprDataSet(
exprs=self.exprs, obs=self.obs[1:], var=self.var, uns=self.uns)
with self.assertRaises(AssertionError):
_ = cb.data.ExprDataSet(
exprs=self.exprs, obs=self.obs, var=self.var[1:], uns=self.uns)
def test_attributes(self):
self.assertTrue(np.all(self.ds.var_names == np.array(["a", "b", "c"])))
self.assertTrue(np.all(self.ds.obs_names == np.array(["d", "e", "f"])))
self.assertTrue(np.all(self.ds.shape == np.array([3, 3])))
def test_copy(self):
ds = self.ds.copy(deep=True)
ds.var_names = ["A", "B", "C"]
ds.obs_names = ["D", "E", "F"]
ds.X[0, 0] = 123
self.assertTrue(np.all(ds.var_names == np.array(["A", "B", "C"])))
self.assertTrue(np.all(ds.obs_names == np.array(["D", "E", "F"])))
self.assertEqual(ds.exprs[0, 0], 123)
self.assertTrue(np.all(self.ds.var_names == np.array(["a", "b", "c"])))
self.assertTrue(np.all(self.ds.obs_names == np.array(["d", "e", "f"])))
self.assertEqual(self.ds.exprs[0, 0], 2)
def test_read_and_write(self):
self.ds.write_dataset("./test.h5")
ds = cb.data.ExprDataSet.read_dataset("./test.h5")
self._compare_datasets(ds, self.ds)
def test_read_and_write_table(self):
self.ds.write_table("./test.txt", orientation="gc")
ds = cb.data.ExprDataSet.read_table("./test.txt", orientation="gc", index_col=0)
self.assertAlmostEqual(np.max(np.abs(
cb.utils.densify(self.ds.exprs) - ds.exprs
)), 0)
self.ds.write_table("./test.txt", orientation="cg")
ds = cb.data.ExprDataSet.read_table("./test.txt", orientation="cg", index_col=0)
self.assertAlmostEqual(np.max(np.abs(
cb.utils.densify(self.ds.exprs) - ds.exprs
)), 0)
def test_loom(self):
with self.ds.to_loom("./test.loom") as lm:
ds = cb.data.ExprDataSet.from_loom(lm)
self.ds.uns = {}
self.assertTrue(np.all(
cb.utils.densify(self.ds.exprs) == cb.utils.densify(ds.exprs)))
def test_normalize(self):
ds = self.ds.normalize(target=100)
exprs_norm = np.array([
[40, 60, 0],
[20, 0, 80],
[0, 0, 100]
])
self.assertTrue((
cb.utils.densify(ds.exprs) != exprs_norm
).sum() == 0)
def test_select_genes(self): # Smoke test
self.ds.find_variable_genes(num_bin=2, grouping="column")
def test_slicing(self):
exprs_ok = np.array([
[2, 0, 0],
[0, 0, 5]
])
var_ok = pd.DataFrame({"column": [1, np.nan, 3]}, index=["a", "g", "c"])
obs_ok = pd.DataFrame({"column": [1, 1]}, index=["d", "f"])
ds_ok = cb.data.ExprDataSet(
exprs=exprs_ok, obs=obs_ok, var=var_ok, uns=self.ds.uns)
self._compare_datasets(self.ds[["d", "f"], ["a", "g", "c"]], ds_ok)
exprs_ok = np.array([
[2, 0],
[0, 5]
])
var_ok = pd.DataFrame({"column": [1, 3]}, index=["a", "c"])
obs_ok = | pd.DataFrame({"column": [1, 1]}, index=["d", "f"]) | pandas.DataFrame |
import hydra
import pandas as pd
import torch
import wandb
from omegaconf import DictConfig
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import DataLoader
from transformers import (AdamW, AutoModelForSequenceClassification,
get_linear_schedule_with_warmup)
from ruatd.dataset import RuARDDataset
from ruatd.engine import eval_fn, train_fn, predict_fn
def run_fold(config, fold_num, df_train, df_valid, df_test):
wandb.init(
config=config,
project=config["project"],
name=f"{config['classification']}_{config['model']}_fold{fold_num}",
)
train_dataset = RuARDDataset(
text=df_train.Text.values, target=df_train.Class.values, config=config
)
train_data_loader = DataLoader(
train_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
pin_memory=config.pin_memory,
drop_last=True,
)
valid_dataset = RuARDDataset(
text=df_valid.Text.values, target=df_valid.Class.values, config=config
)
valid_data_loader = DataLoader(
valid_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
pin_memory=config.pin_memory,
drop_last=True,
)
test_dataset = RuARDDataset(
text=df_test.Text.values,
target=None,
config=config,
is_test=True,
)
test_data_loader = DataLoader(
test_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
pin_memory=config.pin_memory,
)
device = torch.device(config.device)
model = AutoModelForSequenceClassification.from_pretrained(
config.model, num_labels=config.num_classes, ignore_mismatched_sizes=True
)
model.to(device)
model = torch.nn.DataParallel(model, device_ids=config["device_ids"])
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_train_steps = int(len(df_train) / config.batch_size * config.epochs)
optimizer = AdamW(optimizer_parameters, lr=config.lr)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=num_train_steps
)
best_loss = 10000
for _ in range(config.epochs):
train_loss = train_fn(train_data_loader, model, optimizer, device, scheduler)
val_loss, outputs, targets = eval_fn(valid_data_loader, model, device)
# roc_auc = metrics.roc_auc_score(targets, outputs)
# outputs = np.array(outputs) >= 0.5
accuracy = metrics.accuracy_score(targets, outputs)
print(f"Accuracy Score = {accuracy}")
if val_loss < best_loss:
print("Model saved!")
torch.save(
model.module.state_dict(),
f"{config.checkpoint}/{config.classification}/{config.model.split('/')[-1]}_fold{fold_num}.pt",
)
best_loss = val_loss
wandb.log(
{
"train_loss": train_loss,
"val_loss": val_loss,
# "val_roc_auc": roc_auc,
"val_accuracy": accuracy,
}
)
model = AutoModelForSequenceClassification.from_pretrained(
config.model, num_labels=config.num_classes, ignore_mismatched_sizes=True
)
model.load_state_dict(
torch.load(
f"{config.checkpoint}/{config.classification}/{config.model.split('/')[-1]}_fold{fold_num}.pt",
)
)
model.to(device)
model.eval()
valid_dataset = RuARDDataset(
text=df_valid.Text.values, target=None, config=config, is_test=True
)
valid_data_loader = DataLoader(
valid_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
pin_memory=config.pin_memory,
)
prob_valid, df_valid["Class"] = predict_fn(valid_data_loader, model, config)
prob_test, df_test["Class"] = predict_fn(test_data_loader, model, config)
if config.classification == "multiclass":
class_dict = {
0: "ruGPT3-Small",
1: "ruGPT3-Medium",
2: "OPUS-MT",
3: "M2M-100",
4: "ruT5-Base-Multitask",
5: "Human",
6: "M-BART50",
7: "ruGPT3-Large",
8: "ruGPT2-Large",
9: "M-BART",
10: "ruT5-Large",
11: "ruT5-Base",
12: "mT5-Large",
13: "mT5-Small",
}
else:
class_dict = {0: "H", 1: "M"}
pd.concat(
[df_valid["Id"].reset_index(), pd.DataFrame(prob_valid).rename(columns=class_dict)], axis=1
).to_csv(
f"{config.submission}/{config.classification}/prob_valid_{config.model.split('/')[-1]}_fold{fold_num}.csv",
index=None,
)
pd.concat(
[df_test["Id"], pd.DataFrame(prob_test).rename(columns=class_dict)], axis=1
).to_csv(
f"{config.submission}/{config.classification}/prob_test_{config.model.split('/')[-1]}_fold{fold_num}.csv",
index=None,
)
df_test.Class = df_test.Class.map(class_dict)
df_test[["Id", "Class"]].to_csv(
f"{config.submission}/{config.classification}/class_{config.model.split('/')[-1]}_fold{fold_num}.csv",
index=None,
)
wandb.finish()
@hydra.main(config_path="config", config_name="binary")
def main(config: DictConfig):
df_train = pd.read_csv(config.data.train)
df_valid = pd.read_csv(config.data.val)
df_train = pd.concat([df_train, df_valid])
df_test = | pd.read_csv(config.data.test) | pandas.read_csv |
import requests
import pandas as pd
class NSEIndia:
# All the market segments and indices
pre_market_keys = ['NIFTY', 'BANKNIFTY', 'SME', 'FO', 'OTHERS', 'ALL']
live_market_keys = ['NIFTY 50', 'NIFTY NEXT 50', 'NIFTY MIDCAP 50', 'NIFTY MIDCAP 100', 'NIFTY MIDCAP 150',
'NIFTY SMALLCAP 50', 'NIFTY SMALLCAP 100', 'NIFTY SMALLCAP 250', 'NIFTY MIDSMALLCAP 400',
'NIFTY 100', 'NIFTY 200', 'NIFTY500 MULTICAP 50:25:25', 'NIFTY LARGEMIDCAP 250', 'NIFTY AUTO',
'NIFTY BANK', 'NIFTY ENERGY', 'NIFTY FINANCIAL SERVICES', 'NIFTY FINANCIAL SERVICES 25/50',
'NIFTY FMCG', 'NIFTY IT', 'NIFTY MEDIA', 'NIFTY METAL', 'NIFTY PHARMA', 'NIFTY PSU BANK', 'NIFTY REALTY',
'NIFTY PRIVATE BANK', 'NIFTY HEALTHCARE INDEX', 'NIFTY CONSUMER DURABLES', 'NIFTY OIL & GAS',
'NIFTY COMMODITIES', 'NIFTY INDIA CONSUMPTION', 'NIFTY CPSE', 'NIFTY INFRASTRUCTURE', 'NIFTY MNC',
'NIFTY GROWTH SECTORS 15', 'NIFTY PSE', 'NIFTY SERVICES SECTOR', 'NIFTY100 LIQUID 15', 'NIFTY MIDCAP LIQUID 15',
'NIFTY DIVIDEND OPPORTUNITIES 50', 'NIFTY50 VALUE 20', 'NIFTY100 QUALITY 30', 'NIFTY50 EQUAL WEIGHT',
'NIFTY100 EQUAL WEIGHT', 'NIFTY100 LOW VOLATILITY 30', 'NIFTY ALPHA 50', 'NIFTY200 QUALITY 30',
'NIFTY ALPHA LOW-VOLATILITY 30', 'NIFTY200 MOMENTUM 30', 'Securities in F&O', 'Permitted to Trade']
holiday_keys = ['clearing', 'trading']
def __init__(self):
self.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'}
self.session = requests.Session()
self.session.get('http://nseindia.com', headers=self.headers)
# NSE Pre-market data API section
def NsePreMarketData(self, key):
try:
data = self.session.get(f"https://www.nseindia.com/api/market-data-pre-open?key={key}",
headers=self.headers).json()['data']
except:
pass
new_data = []
for i in data:
new_data.append(i['metadata'])
df = pd.DataFrame(new_data)
return df
# NSE Live-market data API section
def NseLiveMarketData(self, key, symbol_list):
try:
data = self.session.get(f"https://www.nseindia.com/api/equity-stockIndices?index={key.upper().replace(' ','%20').replace('&','%26')}",
headers=self.headers).json()['data']
# Use of "replace(' ','%20').replace('&','%26')" -> In live market space is replaced by %20, & is replaced by %26
df = pd.DataFrame(data)
df = df.drop(['meta'], axis=1)
if symbol_list:
return list(df['symbol'])
else:
return df
except:
pass
# NSE market holiday API section
def NseHoliday(self, key):
try:
data = self.session.get(f'https://www.nseindia.com/api/holiday-master?type={key}', headers = self.headers).json()
except:
pass
df = pd.DataFrame(list(data.values())[0])
return df
class NSEIndia2:
def __init__(self):
try:
self.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'}
self.session = requests.Session()
self.session.get('http://nseindia.com', headers=self.headers)
except:
pass
# NSE Option-chain data API section
def GetOptionChainData(self, symbol, indices=False):
try:
if not indices:
url = 'https://www.nseindia.com/api/option-chain-equities?symbol=' + symbol
else:
url = 'https://www.nseindia.com/api/option-chain-indices?symbol=' + symbol
except:
pass
data = self.session.get(url, headers=self.headers).json()["records"]["data"]
df = []
for i in data:
for keys, values in i.items():
if keys == 'CE' or keys == 'PE':
info = values
info['instrumentType'] = keys
df.append(info)
df1 = | pd.DataFrame(df) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
from bokeh.layouts import column, row, gridplot, layout
from bokeh.plotting import figure, curdoc, output_file
from bokeh.models import Div, ColumnDataSource, HoverTool, SingleIntervalTicker, LinearAxis, DatePicker
from bokeh.io import show
from bokeh.palettes import GnBu3,Category20c
from bokeh.transform import cumsum
def get_data(date_select,data_source,feature):
data_got = data_source[data_source["date"] == date_select].groupby(by = ["race"])[feature].agg("sum").reset_index(name='value')
data_got['angle'] = data_got['value'] / data_got['value'].sum() * 2*3.14
data_got['color'] = Category20c[len(data_got.values)]
data_got['percentage'] = (data_got['value'] / data_got['value'].sum())*100
data_got['num'] = data_got['value']
data_got['date'] = date_select
return data_got
def get_data2():
new_data = {}
date = pd.to_datetime(date_picker.value)
new_data["x"] = [date]
new_data["y"] = [data_total[data_total["date"] == date]["confirmed_cases"]]
return new_data
# read data
data_total = | pd.read_csv("https://raw.githubusercontent.com/datadesk/california-coronavirus-data/master/cdph-positive-test-rate.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import yfinance as yf
# tags (part of statement to keep)
tags = ['AssetsCurrent', 'CashAndCashEquivalentsAtCarryingValue', 'LiabilitiesCurrent', 'Liabilities',
'IncomeTaxesPaid', 'IncomeTaxesPaidNet', 'DepreciationDepletionAndAmortization',
'OperatingIncomeLoss', 'Assets', 'StockholdersEquity', 'WeightedAverageNumberOfSharesOutstandingBasic',
'NetCashProvidedByUsedInOperatingActivities', 'OtherLiabilitiesNoncurrent',
'RevenueFromContractWithCustomerExcludingAssessedTax', 'CostOfGoodsAndServicesSold', 'CostOfRevenue',
'EarningsPerShareBasic', 'Revenues', 'ResearchAndDevelopmentExpense', 'SellingGeneralAndAdministrativeExpense',
'PaymentsToAcquirePropertyPlantAndEquipment']
# the quarters the final dataframe should contain
quarters = ['2017Q4', '2018Q1', '2018Q2', '2018Q3', '2018Q4', '2019Q1', '2019Q2', '2019Q3', '2019Q4',
'2020Q1', '2020Q2', '2020Q3', '2020Q4', '2021Q1', '2021Q2', '2021Q3', '2021Q4']
# year of last annual statement
year = 2020
def create_quarterly_data(quarters, tags):
"""
:param quarters: quarters for which financial statement should be considered
:param tags: parts of financial statement which should be considered
:return: returns quarterly data for all tags and quarters
"""
# final DataFrame
financial_statement = pd.DataFrame()
# get ticker data
ticker = pd.read_json('./data/ticker.txt').T
# transform ticker
ticker = ticker.drop(['title'], axis=1)
ticker.columns = ['cik', 'ticker']
ticker['cik'] = ticker['cik'].astype(str)
# some cik's have more than one ticker
ticker = ticker.drop_duplicates(subset='cik')
# iterate though all the folders in data
for folder in os.listdir('./data'):
if folder.startswith("20"):
print(folder)
# import data
sub = pd.read_csv(f"./data/{folder}/sub.txt", sep="\t", dtype={"cik": str})
num = pd.read_csv(f"./data/{folder}/num.txt", sep="\t")
# transform sub data
# filter for needed columns
cols = ['adsh', 'cik', 'name', 'sic', 'form', 'filed', 'period', 'accepted', 'fy', 'fp']
sub = sub[cols]
# change to datetype
sub["accepted"] = pd.to_datetime(sub["accepted"])
sub["period"] = pd.to_datetime(sub["period"], format="%Y%m%d")
sub["filed"] = pd.to_datetime(sub["filed"], format="%Y%m%d")
# filter for quarterly and annual financial data
sub = sub[sub['form'].isin(['10-K', '10-Q'])]
# delete duplicates --> company handed in same file in same period --> only keep newest
sub = sub.loc[sub.sort_values(by=["filed", "accepted"], ascending=False).groupby(["cik", "period"]).cumcount() == 0]
# drop not needed columns
sub = sub.drop(['filed', 'period', 'accepted', 'fy', 'fp'], axis=1)
# merge ticker and sub data
sub = sub.merge(ticker)
# transform num data
# change to datetype
num["ddate"] = pd.to_datetime(num["ddate"], format="%Y%m%d")
# filter for needed columns
cols_num = ['adsh', 'tag', 'ddate', 'qtrs', 'value']
num = num[cols_num]
# only select current date and quarter
num = num.loc[
num.sort_values(by=["ddate", "qtrs"], ascending=(False, True)).groupby(["adsh", "tag"]).cumcount() == 0]
# create quarter and year column
num['quarter'] = num['ddate'].dt.quarter
num['year'] = num['ddate'].dt.year
# merge num and sub data
num = num.merge(sub)
# append to financial statement
financial_statement = financial_statement.append(num)
# filter for needed tags
financial_statement = financial_statement[financial_statement.loc[:, 'tag'].isin(tags)]
financial_statement = financial_statement.sort_values(by='ddate')
# create Q4 data
for idx, row in financial_statement.iterrows():
# when form is 10-K --> annual report --> change to quarterly
if row['form'] == '10-K':
# some companies only deliver full year numbers (qtrs = 4)
if row['qtrs'] == 4:
# filter for company and tag, select index of last 3 quarters
idx_list = financial_statement[
(financial_statement.loc[:, 'ticker'] == row['ticker']) &
(financial_statement.loc[:, 'tag'] == row['tag'])].index.values.tolist()
idx_position = idx_list.index(idx)
idx_list = idx_list[idx_position - 3:idx_position]
# subtract sum of all quarters from full year number
financial_statement.at[idx, 'value'] = financial_statement.at[idx, 'value'] - \
financial_statement.loc[idx_list, 'value'].sum()
# reset index
financial_statement = financial_statement.reset_index()
# only keep last 16 quarters
financial_statement['year-quarter'] = financial_statement['year'].astype(str) + 'Q' + financial_statement['quarter'].astype(str)
financial_statement = financial_statement.loc[financial_statement['year-quarter'].isin(quarters)]
financial_statement = financial_statement.drop(['index', 'adsh', 'ddate', 'qtrs', 'form'], axis=1)
# save as gzip file
financial_statement.to_parquet('./data/financial_statements.parquet.gzip', compression='gzip')
return financial_statement
def create_annual_data(tags):
"""
:param tags: parts of financial statement which should be considered
:return: returns annual data for all tags
"""
# final DataFrame
financial_statement = pd.DataFrame()
# get ticker data
ticker = pd.read_json('./data/ticker.txt').T
# transform ticker
ticker = ticker.drop(['title'], axis=1)
ticker.columns = ['cik', 'ticker']
ticker['cik'] = ticker['cik'].astype(str)
# drop companies with several tickers
ticker = ticker.drop_duplicates(subset='cik')
# iterate though all the folders in data
for folder in os.listdir('./data'):
if folder.startswith("20"):
print(folder)
# import data
sub = | pd.read_csv(f"./data/{folder}/sub.txt", sep="\t", dtype={"cik": str}) | pandas.read_csv |
import pandas as pd
import numpy as np
import yfinance as yf
from datetime import date
from sklearn.cluster import KMeans
from financeLib import monte_carlo, VaR, forecasting
#input
years = 12
today = date.today()
start = today.replace(year=today.year - years)
composite = '^IXIC' #Nasdaq composite
csv = pd.read_csv('stock.csv') #Nasdaq stocks
stockList = csv['Stock'].tolist()
stockPrice = yf.download(stockList, start=start, end=today, interval='1d')['Adj Close']
stockPrice['Composite'] = yf.download(composite, start=start, end=today, interval='1d')['Adj Close']
stockPrice = stockPrice.fillna(method='ffill')
stockPrice = stockPrice.fillna(0)
returns = stockPrice.pct_change()
reu = returns.fillna(0)
reu = reu.replace(np.inf, 0)
reu = reu.mean()
beta = (returns.cov()['Composite'])/(returns['Composite'].var())
stockSummary = pd.concat([beta, reu], axis=1)
stockSummary = stockSummary.rename(columns={'Composite': 'Beta',0: 'Returns'})
composite_summary = stockSummary.loc['Composite']
stockSummary = stockSummary.drop(['Composite'], axis=0)
stockSummary = stockSummary.dropna(axis=0)
z = 8
wcss = []
for i in range(1, z):
kmeans = KMeans(n_clusters=i, max_iter=300)
kmeans.fit(stockSummary)
wcss.append(kmeans.inertia_)
clustering = KMeans(n_clusters=z, max_iter=300)
clustering.fit(stockSummary)
stockSummary['Clusters'] = clustering.labels_
print(stockSummary['Clusters'].value_counts())
print(stockSummary.groupby('Clusters').mean())
higherReturn = ((stockSummary.groupby('Clusters').mean())['Returns'].sort_values()[1:]).index[0]
selectedCluster = stockSummary[stockSummary['Clusters'] == higherReturn]
confidenceInterval = 0.3
portfolio = selectedCluster[selectedCluster['Returns'] > (selectedCluster.mean()['Returns'] - (selectedCluster.std()['Returns']))]
portfolioTicker = (portfolio.index).tolist()
portfolioTicker = np.random.choice(portfolioTicker, 5, replace=False)
print(portfolioTicker)
portfolioReturn = (stockPrice[portfolioTicker].pct_change()).dropna()
portfolioReturn = portfolioReturn.replace(np.inf, 0)
lastPrice = stockPrice[portfolioTicker].iloc[-1]
monteCarlo = monte_carlo(portfolioReturn, portfolioTicker)
VaRReturns = monteCarlo[-1,:]
forec = []
for i in range(0, len(portfolioTicker)):
print(portfolioTicker[i:i+1][0])
vars()[portfolioTicker[i:i+1][0]] = forecasting(stockPrice[portfolioTicker[i:i+1]], portfolioTicker[i:i+1][0])
forec.append(vars()[portfolioTicker[i:i+1][0]])
forecast = pd.concat(forec, axis=1)
data = stockPrice[portfolioTicker]
dataLenght = round(len(data)*.8)#80% of the data
originalData = data[:dataLenght].loc[:, ::-1]
forecastedData = forecast.loc[:, ::-2]
originalData.columns = forecastedData.columns
forecastedReturn = | pd.concat([originalData, forecastedData]) | pandas.concat |
"""
"""
import random
import sys
import os
import glob
from typing import Union, Optional, Tuple, Dict
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from easydict import EasyDict as ED
import xml.etree.ElementTree as ET
import torch
from torchvision.transforms import functional as F
from torch.utils.data.dataset import Dataset
from dataset import image_data_augmentation, Yolo_dataset
from cfg_acne04 import Cfg
Cfg.dataset_dir = '/mnt/wenhao71/data/acne04/filtered_images/'
label_map_dict = ED({
'acne': 0,
})
class ACNE04(Yolo_dataset):
"""
"""
def __init__(self, label_path:str, cfg:ED, train:bool=True):
"""
unlike in Yolo_dataset where the labels are stored in a txt file,
with each line xmin,ymin,xmax,ymax,id ...
annotations of ACNE04 are already converted into a csv file
"""
if cfg.mixup == 2:
raise ValueError("cutmix=1 - isn't supported for Detector")
elif cfg.mixup == 2 and cfg.letter_box:
raise ValueError("Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters")
self.cfg = cfg
self.train = train
df_ann = pd.read_csv(label_path)
df_ann = df_ann[df_ann['class'].isin(label_map_dict.keys())].reset_index(drop=True)
# NOTE that the annotations used in this project are NOT in Yolo format, but in VOC format
# ref Use_yolov4_to_train_your_own_data.md
# df_ann['xcen'] = df_ann.apply(lambda row: (row['xmax']+row['xmin'])/2/row['width'], axis=1)
# df_ann['ycen'] = df_ann.apply(lambda row: (row['ymax']+row['ymin'])/2/row['height'], axis=1)
# df_ann['box_width'] = df_ann['box_width'] / df_ann['width']
# df_ann['box_height'] = df_ann['box_height'] / df_ann['height']
df_ann['class_index'] = df_ann['class'].apply(lambda c: label_map_dict[c])
# each item of `truth` is of the form
# key: filename of the image
# value: list of annotations in the format [xmin, ymin, xmax, ymax, class_index]
# truth = {k: [] for k in df_ann['filename'].tolist()}
truth = {}
for fn, df in df_ann.groupby("filename"):
truth[fn] = df[['xmin', 'ymin', 'xmax', 'ymax', 'class_index']].values.astype(int).tolist()
# for _, row in df_ann.iterrows():
# truth[row['filename']].append(row[['xmin', 'ymin', 'xmax', 'ymax', 'class_index']].tolist())
# f = open(label_path, 'r', encoding='utf-8')
# for line in f.readlines():
# data = line.split(" ")
# truth[data[0]] = []
# for i in data[1:]:
# truth[data[0]].append([int(j) for j in i.split(',')])
self.truth = truth
self.imgs = list(self.truth.keys())
def __len__(self):
return super().__len__()
def __getitem__(self, index):
if self.train:
return super().__getitem__(index)
else:
return self._get_val_item(index)
def _get_val_item(self, index):
"""
"""
img_path = self.imgs[index]
bboxes_with_cls_id = np.array(self.truth.get(img_path), dtype=np.float)
img = cv2.imread(os.path.join(self.cfg.dataset_dir, img_path))
# img_height, img_width = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (self.cfg.w, self.cfg.h))
# img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
num_objs = len(bboxes_with_cls_id)
target = {}
# boxes to coco format
boxes = bboxes_with_cls_id[...,:4]
boxes[..., 2:] = boxes[..., 2:] - boxes[..., :2] # box width, box height
target['boxes'] = torch.as_tensor(boxes, dtype=torch.float32)
target['labels'] = torch.as_tensor(bboxes_with_cls_id[...,-1].flatten(), dtype=torch.int64)
target['image_id'] = torch.tensor([get_image_id(img_path)])
target['area'] = (target['boxes'][:,3])*(target['boxes'][:,2])
target['iscrowd'] = torch.zeros((num_objs,), dtype=torch.int64)
return img, target
def train_val_test_split(df:pd.DataFrame, train_ratio:Union[int,float]=70, val_ratio:Union[int,float]=15, test_ratio:Union[int,float]=15) -> Tuple[pd.DataFrame,pd.DataFrame,pd.DataFrame]:
"""
"""
from random import shuffle
from functools import reduce
if isinstance(train_ratio, int):
train_ratio = train_ratio / 100
val_ratio = val_ratio / 100
test_ratio = test_ratio / 100
assert train_ratio+val_ratio+test_ratio == 1.0
all_files_by_level = {f"lv{i}": [] for i in range(4)}
for fn in df['filename'].unique():
all_files_by_level[f"lv{fn.split('_')[0][-1]}"].append(fn)
train, val, test = ({f"lv{i}": [] for i in range(4)} for _ in range(3))
for i in range(4):
shuffle(all_files_by_level[f"lv{i}"])
lv_nb = len(all_files_by_level[f"lv{i}"])
train[f"lv{i}"] = all_files_by_level[f"lv{i}"][:int(train_ratio*lv_nb)]
val[f"lv{i}"] = all_files_by_level[f"lv{i}"][int(train_ratio*lv_nb): int((train_ratio+val_ratio)*lv_nb)]
test[f"lv{i}"] = all_files_by_level[f"lv{i}"][int((train_ratio+val_ratio)*lv_nb):]
train = reduce(lambda a,b: a+b, [v for _,v in train.items()])
val = reduce(lambda a,b: a+b, [v for _,v in val.items()])
test = reduce(lambda a,b: a+b, [v for _,v in test.items()])
for i in range(4):
print(f"lv{i} ----- ", len(all_files_by_level[f"lv{i}"]))
df_train = df[df['filename'].isin(train)].reset_index(drop=True)
df_val = df[df['filename'].isin(val)].reset_index(drop=True)
df_test = df[df['filename'].isin(test)].reset_index(drop=True)
return df_train, df_val, df_test
def get_image_id(filename:str) -> int:
"""Convert a string to a integer."""
lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
lv = lv.replace("levle", "")
no = f"{int(no):04d}"
return int(lv+no)
def voc_to_df(img_dir:str, ann_dir:str, save_path:Optional[str]=None, class_map:Optional[Dict[str,str]]=None, **kwargs) -> pd.DataFrame:
""" finished, checked,
pascal voc annotations (in xml format) to one DataFrame (csv file)
Parameters:
-----------
img_dir: str,
directory of the image files
ann_dir: str,
directory of the bounding box annotation xml files
save_path: str, optional,
path to store the csv file
class_map: dict, optional,
label map, from class names of the annotations to the class names for training
Returns:
--------
bbox_df: DataFrame,
annotations in one DataFrame
"""
xml_list = []
img_dir_filenames = os.listdir(img_dir)
for xml_file in glob.glob(os.path.join(ann_dir, '*.xml')):
tree = ET.parse(xml_file)
img_file = os.path.splitext(os.path.basename(xml_file))[0]
img_file = [os.path.join(img_dir, item) for item in img_dir_filenames if item.startswith(img_file)]
if len(img_file) != 1:
print(f"number of images corresponding to {os.path.basename(xml_file)} is {len(img_file)}")
continue
img_file = img_file[0]
root = tree.getroot()
if len(root.findall('object')) == 0:
print('{} has no bounding box annotation'.format(xml_file))
for member in root.findall('object'):
fw = int(root.find('size').find('width').text)
fh = int(root.find('size').find('height').text)
# or obtain fw, fh from image read from `img_file`
subcls_name = member.find('name').text
xmin = int(member.find('bndbox').find('xmin').text)
ymin = int(member.find('bndbox').find('ymin').text)
xmax = int(member.find('bndbox').find('xmax').text)
ymax = int(member.find('bndbox').find('ymax').text)
box_width = xmax-xmin
box_height = ymax-ymin
box_area = box_width*box_height
if box_area <= 0:
continue
values = {
'filename': root.find('filename').text if root.find('filename') is not None else '',
'width': fw,
'height': fh,
'segmented': root.find('segmented').text if root.find('segmented') is not None else '',
'subclass': subcls_name,
'pose': member.find('pose').text if member.find('pose') is not None else '',
'truncated': member.find('truncated').text if member.find('truncated') is not None else '',
'difficult': member.find('difficult').text if member.find('difficult') is not None else '',
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
'box_width': box_width,
'box_height': box_height,
'box_area': box_area,
}
xml_list.append(values)
column_names = ['filename', 'width', 'height', 'segmented', 'pose', 'truncated', 'difficult', 'xmin', 'ymin', 'xmax', 'ymax', 'box_width', 'box_height', 'subclass', 'box_area']
bbox_df = | pd.DataFrame(xml_list, columns=column_names) | pandas.DataFrame |
import netifaces
import pyshark
import os
import csv
import multiprocessing
import pandas as pd
import subprocess
from scipy.io import arff
import threading
import time
import random
import logging
import mysql.connector
from shutil import copyfile, rmtree
from sklearn.model_selection import train_test_split
from keras import utils as U
import tensorflow as tf
from tensorflow import keras
import MLP
import Utils as utils
import RetrainModels as rtm
import DbSetUp as dtb
import datetime
import numpy
import matplotlib.pyplot as plt
import BlockIps as bl
from pymouse import PyMouse
from datetime import date
from collections import Counter
logging._warn_preinit_stderr = 0
logging.basicConfig(filename='log/app.log', filemode='w+', format='%(process)d - %(thread)s - %(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
utils.setLogger(logger)
#root directory
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
#false positive Counter
falsePackets = 0
falseFlows = 0
#decisions Counter
flowsT = 0
flowsA = 0
packA = 0
packT = 0
#databases indicators
flowWorkingDb = 1
trafficWorkingDb = 1
#those correspond to packet model
trainingTimeout = 360
fullTrainingTimeout = 7200
#those correspond to flow models
fullFTrainingTimeout = 100000
trainingFTimeout = 3600
start = check = end = time.time()
startF = checkF = endF = time.time()
numeric_types = [int, float, complex]
modelFlow = MLP.MLP([100,100], 147)
modelPacket = MLP.MLP([10,10], 8, optimizer='rms')
trainingLock = threading.Lock()
flowLock = threading.Lock()
config = tf.ConfigProto(
device_count={'GPU': 1},
intra_op_parallelism_threads=1,
allow_soft_placement=True
)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
session = tf.Session(config=config)
keras.backend.set_session(session)
'''
Get the highest_layer, transport_layer, source_Ip, destination_Ip,
Source_Port, Destination_Port, Packet_length, Packet/Time information
about a packet
'''
def get_packet_information(packet, time, count):
global logger
try:
if packet.highest_layer != 'ARP':
ip= utils.get_ip(packet)
packets_time = 0
if float(time) != 0:
packets_time = count / float(time)
try:
#'Source Port', 'Dest Port', 'Source IP', 'Dest IP', 'Packet Length','Packets/Time', 'Packet Type',
data = [ip.src, ip.dst, int(packet[packet.transport_layer].srcport),
int(packet[packet.transport_layer].dstport), int(packet.length),
packets_time, packet.highest_layer, packet.transport_layer]
return data
except AttributeError:
data = [ip.src, ip.dst, 0, 0, int(packet.length), packets_time,
packet.highest_layer, packet.transport_layer]
return data
except (UnboundLocalError, AttributeError):
ip= utils.get_ip(packet)
if ip is None:
logger.info("The packet "+ str(count) + " wasn't identified as either IPv4 or IPv6\n")
logger.info(packet)
else:
logger.info("An unknown error has occurred with packet "+ str(count) +"\n")
logger.info(packet)
return None
'''
Handle the threatening attack by blocking the source of the traffic
'''
def handleDDoS(ip, flowip, port, origin):
utils.blockIp(ip, flowip, port, origin)
'''
Check whether the packet is dangerous or not by computing the prediction
that it is a ddos attack or not
packet- packet to be analyzed
count- the count of the packet that was reached
timeRecorded - the time at which the packet was Recorded
arriveT - time at which the packet actually arrived at
db - the currently used db
'''
def check_packet(packet, count, timeRecorded, arriveT, db):
global modelPacket, logger, falsePackets, session, packT, packA
packT += 1
try:
datat = get_packet_information(packet, arriveT, count)
if datat == None:
pass
else:
protocol = utils.get_ipvx(packet)
data, nonNumeric = utils.labelEncoder(datat, 'LiveCapture')
data = pd.DataFrame(data, columns=utils.getPacketNames())
flowId = utils.getFlowId(nonNumeric[1], nonNumeric[0], int(data['Dest Port']), int(data['Source Port']), protocol)
#once done remove the first and uncomment the second
#prediction = 0
try:
with session.as_default():
with session.graph.as_default():
modelPacket.model._make_predict_function()
prediction = modelPacket.model.predict(data)
prediction = numpy.argmax(prediction[0])
packA += prediction
print()
print("This is packet "+ str(datat) )
print("This is prediction " + str(prediction))
print("Recorded "+ str(packT) +" packs ")
print("From those "+ str(packA) + " were attacks")
print()
predictedTime = time.time() - timeRecorded
#check the percentage that this packet is part of an attack
flows = pd.DataFrame()
aux = dtb.getFlowIdFCols('finalFlow',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('newFlow0',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('newFlow1',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('newFlow2',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
packets = pd.DataFrame()
aux = dtb.getFlowIdPCols('finalPackets',['Flow_ID', 'Predict'],flowId,arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols(db,['Flow_ID', 'Predict'],flowId,arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
pred = 0
if flows.empty and packets.empty:
pred = prediction
else:
packets = packets.append(pd.DataFrame([[flowId,prediction]]), ignore_index = True)
if not flows.empty:
flows.columns = ['Flow_ID', 'Label']
else:
flows = pd.DataFrame(columns = ['Flow_ID', 'Label'])
if not packets.empty:
packets.columns = ['Flow_ID', 'Predict']
else:
packets = pd.DataFrame(columns = ['Flow_ID', 'Predict'])
pred = utils.getTargets(packets, flows)[0]
if pred != prediction:
logger.info("Found a possible false positive in packets check")
falsePackets += 1
if pred == 0 :
print("Packet Not attack")
print()
print()
insert = threading.Thread(name="check_packet"+str(count), target = dtb.insert_packet, args=(data, nonNumeric, protocol, prediction, arriveT, predictedTime, predictedTime, db))
insert.start()
insert.join()
elif pred == 1:
print("Packet Attack")
print()
print()
handleAttack = threading.Thread(target = handleDDoS, args=(nonNumeric[1], flowId,data['Source Port'], 'Packet'), daemon=True)
handleAttack.start()
handleAttack.join()
handledTime = time.time() - timeRecorded
insert = threading.Thread(name="check_packet"+str(count), target = dtb.insert_packet, args=(data, nonNumeric, protocol, prediction, arriveT, predictedTime, handledTime, db))
insert.start()
insert.join()
else:
logger.warning("There is an unexpected prediction answer "+ str(prediction))
except Exception as e:
logging.error(e)
except KeyboardInterrupt as e :
global logger
logger.info("Program interrupted")
return
'''
There exists an issue with the CICFlowMeter's conversion from captured packets
to flows; as such, only the live recording of flows is allowed (as this is not
a main part of the topic it is not to be dealt with)
'''
def check_flow(time, count):
global logger
logger.info("Flow is checked somewhere else")
'''
This function checks when a training was ended so that the used model can change
'''
def changeUsedPacketModel():
global logger
try:
training = False
global trainingLock, modelPacket
while(True):
if trainingLock.locked():
training = True
elif training == True:
modelPacket.model.load_weights('trafficModels/currentModel.h5')
print("Model was changed")
training = False
else:
pass
except KeyboardInterrupt as e:
print("Program was stopped")
return
except Exception as e:
logger.error("Exception occurred", exc_info=True)
'''
this function checks if it is the time for one of the packet db to be trained
by checking if there exists any other training currently in progress and if
the time for training was reached.
'''
def checkTrainingTimesP(count):
global start, check, end, logger, fullTrainingTimeout, ROOT_DIR, trainingLock, trainingTimeout, trafficWorkingDb
if end - start >= fullTrainingTimeout:
start = check = time.time()
nameTraining = ''
#stop any active refiting
for t in multiprocessing.active_children():
if t.name in ['TrainingPacket1', 'TrainingPacket2', 'TrainingPacket0']:
nameTraining = str(t.name).replace('TrainingPacket','newPackets')
t.stop()
t.join()
#try to remove the epoch folders
if os.path.exists(ROOT_DIR + "/filePacketTrained"):
try:
rmtree(ROOT_DIR + "/filePacketTrained")
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
dtb.insertPTable(nameTraining)
logger.info("Fully retrain at count "+ str(count))
print(count)
#move all existent data in the main db
dtb.insertPTable('newPackets'+str(trafficWorkingDb))
fullTraining = rtm.retrainPacketModel(args=('finalPackets', trainingLock),
name="finalPacketsTraining", daemon=True)
logger.info("Started training a completely new model for checking packets")
fullTraining.start()
#use a new db for storing
trafficWorkingDb = (trafficWorkingDb + 1) % 3
logger.info("Changed to new packet database "+ str(trafficWorkingDb))
#if the training time is reached, check if no training is occuring
#if another training is occuring, keep on storing information
elif end - check >= trainingTimeout:
check = time.time()
logger.info("Finished working with packet "+ str(trafficWorkingDb))
#check if any database is in training
#change working database to the nontraining one
changedProcess = False
for t in multiprocessing.active_children():
if t.name == 'finalPacketsTraining':
logger.info("Currently a completely new packet model is being trained")
trafficWorkingDb = (trafficWorkingDb + 1) % 3
changedProcess = True
break
elif t.name not in ['TrainingPacket0', 'TrainingPacket1', 'TrainingPacket2']:
pass
elif t.name == ("TrainingPacket" + str((trafficWorkingDb + 1) % 3)):
trafficWorkingDb = (trafficWorkingDb + 2) % 3
changedProcess = True
break
elif t.name == ("TrainingPacket" + str((trafficWorkingDb + 2) % 3)):
trafficWorkingDb = (trafficWorkingDb + 1) % 3
changedProcess = True
break
elif t.name == ("TrainingPacket" + str(trafficWorkingDb)) :
logger.error("Error: Program has been writing in the training packet database")
trafficWorkingDb = (trafficWorkingDb + 1) % 3
changedProcess = True
break
else:
pass
#if no database is training refit the current one
if changedProcess == False:
logger.info("Partial retraining at count "+ str(count))
print("Partial at "+ str(count))
nameProcess = "TrainingPacket" + str(trafficWorkingDb)
if os.path.exists(ROOT_DIR + "/filePacketTrained"):
try:
rmtree(ROOT_DIR + "/filePacketTrained")
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
training = rtm.retrainPacketModel(args=('newPackets'+str(trafficWorkingDb), trainingLock),
name=nameProcess, daemon=True)
logger.info("Started training packet "+ str(trafficWorkingDb))
training.start()
trafficWorkingDb = (trafficWorkingDb + 1) % 3
logger.info("Changed to new packet database "+ str(trafficWorkingDb))
return
'''
capture live-traffic from selected interface into the respective
thread pcap file
The function then passes the packet onto a checker and onto a time checker, meant to
determine if the time for refitting or retraining was reached
'''
def capture_interface(iface):
global trafficWorkingDb, logger, start, check, end, falseFlows, falsePackets
#save all traffic for checking for false positives and missed values
if iface == "all":
cap = pyshark.LiveCapture(output_file="traffic.pcap")
else:
cap = pyshark.LiveCapture(interface=iface, output_file="traffic.pcap")
cap.set_debug()
packet_iterator = cap.sniff_continuously
changeUsedModel = threading.Thread(name="changeUsedPacketModel", target=changeUsedPacketModel, args=())
changeUsedModel.start()
try:
start = check = time.time()
count = 0
#for each read packet
for packet in packet_iterator():
count += 1
end = time.time()
#check if packet is a threat
arriveT = packet.frame_info.time_relative
check_packet(packet, count, end, arriveT, 'newPackets' + str(trafficWorkingDb))
#check if it is time for retraining
training = threading.Thread(name = "checkTrainingTimesP", target= checkTrainingTimesP, args=(count,))
training.start()
except Exception as e:
print(e)
except KeyboardInterrupt:
print("The number of false packets were "+ str(falsePackets))
print("The number of false flows were "+ str(falseFlows))
utils.closeVers()
cap.close()
time.sleep(1)
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is main_thread:
pass
t.join()
for t in multiprocessing.active_children():
t.stop()
t.join()
exit()
#get_flow_spec()
cap.close()
'''
this function checks if a new model was created and changes the current used one to that one
'''
def changeUsedFlowModel():
global logger
try:
training = False
global flowLock, modelFlow
while(True):
if flowLock.locked():
training = True
elif training == True:
modelFlow.model.load_weights('flowModels/currentModel.h5')
print("Model was changed")
training = False
else:
pass
except KeyboardInterrupt as e :
print("Program was stopped")
return
except Exception as e:
logger.error("Exception occurred", exc_info=True)
'''
this function checks if it is the time for one of the flow db to be trained
by checking whether there is another training in execution and by checking that
the training time was reached
count - marks the number of flows reached
'''
def checkTrainingTimesF(count):
global startF, checkF, endF, fullFTrainingTimeout, logger, ROOT_DIR, trainingFTimeout, flowWorkingDb, flowLock
if endF - startF >= fullFTrainingTimeout:
startF = checkF = time.time()
nameTraining =''
#stop any refitting
for r in multiprocessing.active_children():
if t.name in ['TrainingFlow1','TrainingFlow2','TrainingFlow0']:
nameTraining = str(t.name).replace("TrainingFlow", 'newFlow')
t.stop()
t.join()
#Try to remove epoch folders
if os.path.exists(ROOT_DIR + "/fileFlowTrained"):
try:
rmtree(ROOT_DIR + 'filePacketTrained')
except OSError as e:
print ("Error: %s - %s." %(e.filename, e.strerror))
dtb.insertFTable(nameTraining)
logger.info("Fully retrain at count" + str(count))
print(count)
#move all existent data in the main db
dtb.insertFTable('newFlow' + str(flowWorkingDb))
fullTraining = rtm.retrainFlowModel(args=('finalFlow',flowLock), name ='finalFlowsTraining', daemon=True)
logger.info("Started training a completely new model for checking flows")
fullTraining.start()
#change db for storing
flowWorkingDb = (flowWorkingDb + 1) % 3
logger.info("Changed to new flow database" + str(flowWorkingDb))
#if the training time is reached check if no othr training is ocurring
#if another is happening , keep on storing information
elif endF - checkF >= trainingFTimeout:
checkF = time.time()
logger.info("Fininshed working with flow "+ str(flowWorkingDb))
#check if any db in trainingFile
#change working db to nontraining one
changedProcess = False
for t in multiprocessing.active_children():
if t.name == 'finalFlowsTraining':
logger.info("Currently a completely new flow model is being trained")
flowWorkingDb = (flowWorkingDb + 1) % 3
changedProcess = True
break
elif t.name not in ['TrainingFlow0', 'TrainingFlow1', 'TrainingFlow2']:
pass
elif t.name == ("TrainingFlow" + str((flowWorkingDb + 1) % 3)):
flowWorkingDb = (flowWorkingDb + 2) % 3
changedProcess = True
break
elif t.name == ("TrainingFlow" + str((flowWorkingDb + 2) % 3)):
flowWorkingDb = (flowWorkingDb + 1) % 3
changedProcess = True
break
elif t.name == ("TrainingFlow" + str(flowWorkingDb)) :
logger.error("Error: Program has been writing in the training packet database")
flowWorkingDb = (flowWorkingDb + 1) % 3
changedProcess = True
break
else:
pass
#if no database is in training refit the current one
if changedProcess == False:
logger.info("Partial retraining at count "+ str(count))
print("Partial at "+ str(count))
nameProcess = "Training Flow" + str(flowWorkingDb)
if os.path.exists(ROOT_DIR + "/fileFlowTrained"):
try:
rmtree(ROOT_DIR + "/fileFlowTrained")
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
training = rtm.retrainFlowModel(args=('newFlow'+str(trafficWorkingDb), trainingLock),
name=nameProcess, daemon=True)
logger.info("Started training packet "+ str(trafficWorkingDb))
training.start()
flowWorkingDb = (flowWorkingDb + 1) % 3
logger.info("Changed to new packet database "+ str(trafficWorkingDb))
'''
flow- flow to be analyzed
timeRecorded - the time the flow was read as
arriveT - the time the flow was recorded (not started to analyze) at
db - the currently used db
count- the flow reached to analyze
The function obtains the converted data and it tests it against a predictive
model. If traffic is attack then the flow gets sent to mitigation and then
gets saved
otherwise, it gets saved
'''
def flowCheck(flow, timeRecorded, arriveT, db, count):
global modelFlow, logger, session, falseFlows, flowsT, flowsA
flowsT += 1
try:
remove = ["Src IP", "Dst IP", "Label\n", "Timestamp", "Flow ID"]
df = utils.to_one_hot_encoding(flow).drop(remove, axis=1)
dat = df.drop(['Flow Byts/s', 'Flow Pkts/s'], axis=1)
df_num = dat.apply(pd.to_numeric)
df_num = df_num.select_dtypes(['number'])
dataset = df_num.to_numpy()
prediction = 0
#session.run(tf.global_variables_initializer())
try:
with session.as_default():
with session.graph.as_default():
modelFlow.model._make_predict_function()
prediction = modelFlow.model.predict(dataset)
prediction = numpy.argmax(prediction[0])
flowsA += prediction
print("Recorded "+ str(flowsT) +" flows ")
print("From those "+ str(flowsA) + " were attacks")
print()
print("This is flow check")
print(flow)
print("The prediction is: "+ str(prediction))
print()
predictedTime = time.time() - timeRecorded
flowId = flow['Flow ID']
#check if overall this flow belongs to an attack
flows = pd.DataFrame()
aux = dtb.getFlowIdFCols(db,['Flow_ID','Label'],flowId[0],arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('finalFlow',['Flow_ID','Label'],flowId[0],arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
packets = pd.DataFrame()
aux = dtb.getFlowIdPCols('finalPackets',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols('newPackets0',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols('newPackets1',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols('newPackets2',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
pred = 0
if flows.empty and packets.empty:
pred = prediction
else:
flows = flows.append(pd.DataFrame([[flow['Flow ID'], prediction]]), ignore_index = True)
if not flows.empty:
flows.columns = ['Flow_ID', 'Label']
else:
flows = pd.DataFrame(columns = ['Flow_ID', 'Label'])
if not packets.empty:
packets.columns = ['Flow_ID', 'Predict']
else:
packets = pd.DataFrame(columns = ['Flow_ID', 'Predict'])
pred = utils.getTargetsF(flows, packets)[0]
if pred != prediction:
logger.info("Found a possible false positive in flows check")
falseFlows += 1
if pred == 0 :
print("Flow Not attack")
print()
print()
insert = threading.Thread(name="check_Flow"+str(count), target = dtb.insertFlow, args=(flow.drop('Label\n', axis=1), prediction, arriveT, predictedTime, predictedTime, db))
insert.start()
insert.join()
elif pred == 1:
print("Flow Attack")
print()
print()
handleAttack = threading.Thread(target = handleDDoS, args=(str(flow['Src IP']),str(flow['Flow ID']),int(flow['Src Port']),'Flow'), daemon=True)
handleAttack.start()
handleAttack.join()
handledTime = time.time() - timeRecorded
insert = threading.Thread(name="check_Flow"+str(count), target = dtb.insertFlow, args=(flow.drop('Label\n', axis=1), prediction, arriveT, predictedTime, handledTime, db))
insert.start()
insert.join()
else:
logger.warning("There is an unexpected prediction answer "+ str(prediction))
except Exception as e:
print(e)
except KeyboardInterrupt as e :
global logger
logger.info("Program interrupted")
return
def follow(thefile):
thefile.seek(0,2)
while True:
line = thefile.readline()
if not line:
#time.sleep(0.1)
continue
yield line
'''
count- the number of flow that we have reached to read (starts from 0)
the function reads flows as they get saved inside the daily flow.csv file,
which then get sent for checking and for retraining if necessary
'''
def watch_flow(count):
global flowWorkingDb, startF, endF, checkF, logger, flowsT, flowsA, packA, packT
day = date.today()
flowPacketName = "data/daily/" + str(day) + "_Flow.csv"
changeUsedModel = threading.Thread(name="changeUsedFlowModel", target=changeUsedFlowModel, args=())
changeUsedModel.start()
try:
logfile = open(flowPacketName, "r")
cols = logfile.readline()
utils.setFlowNames(cols.split(","))
loglines = follow(logfile)
startF = checkF = time.time()
lines = logfile.readlines()
#for line in loglines:
while True:
lines = logfile.readlines()
#print(lines)
#line = logfile.readline()
for line in lines:
count += 1
endF = time.time()
flow = pd.DataFrame([line.split(",")], columns = utils.getFlowNames())
print(flow)
arriveT = flow['Timestamp']
#check if flow is a threat
flowCheck(flow, endF, arriveT, 'newFlow' + str(flowWorkingDb), count)
training = threading.Thread(name="checkTrainingTimesF", target=checkTrainingTimesF, args=(count,))
training.start()
except KeyboardInterrupt:
return
except Exception as e:
print(e)
time.sleep(2)
watch_flow(count)
'''
Starts 1/3 CICFlowMeter instances
This is necessary as some error in the programatic side of the app used only allows
for a correct reading to be done in live reading
In case the application doesn't single-handledly start the reading of traffic on
any, then that needs to be started by hand.
'''
def run_CICFlowMeter():
m = PyMouse()
x, y = m.position()
cmd = ['sudo ./CICFlowMeter']
#open 3 app instances
p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)
#p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)
#p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)
#separate the instances on the screen
time.sleep(5)
m.press(650,300,1) #move 1
m.release(60,20)
time.sleep(0.5)
m.move(740,300)
time.sleep(0.5)
'''m.press(740,300,1) #move 2
m.release(100,500)
m.move(750,300)
time.sleep(0.5)
m.press(750,300,1) #move 3
m.release(800,20)
time.sleep(0.5)'''
m.click(60,370) #set load1
time.sleep(2)
'''m.click(750,370) #set load2
time.sleep(0.5)
m.click(60,850) #set load3
time.sleep(0.5)'''
m.click(300,490)
m.click(300,480) #set any
time.sleep(0.25)
'''m.click(790,490)
time.sleep(0.25)
m.click(790,480) #set any
time.sleep(0.25)
m.click(300,870)
time.sleep(0.25)
m.click(300,960) #set any
time.sleep(0.25)'''
s = time.time()
m.click(60,410) #start 1
time.sleep(0.25)
'''m.click(740,400) #start 2
time.sleep(0.5)
m.click(30,878) #start 3'''
'''inst1 = threading.Thread(target = run1, args=(m,s))
inst2 = threading.Thread(target = run2, args=(m,s))
inst3 = threading.Thread(target = run3, args=(m,s))'''
'''inst1.start()
inst2.start()
inst3.start()'''
p.wait()
'''
choose an interface
and then capture the traffic and the respective flow
any to sniff all interfaces
timeout- capture time
'''
def read_and_analize_traffic():
print(-1, "any ")
interfaces = utils.interface_names()
for i, value in enumerate(interfaces):
print(i, value)
print('\n')
iface = input("Please select interface by name: ")
flowReader = threading.Thread(target=run_CICFlowMeter, args=())
flowReader.start()
packet = threading.Thread(target=capture_interface, args=(iface,))
flow = threading.Thread(target=watch_flow, args=(0,))
packet.start()
flow.start()
packet.join()
flow.join()
'''
run 3 stops the 1st instance every 180 seconds and saves the recorded flows
m - mouse controller
t - current time
'''
def run3(m, t):
try:
pas = False
while pas == False:
e = time.time()
if e - t >= 180:
m.click(30,920) #stop 3
time.sleep(0.5)
m.click(30,920)
time.sleep(0.5)
m.click(400,780) #save in time 3
time.sleep(0.25)
m.click(30,878) #start 3
t = e
pas = True
run3(m, t)
except KeyboardInterrupt as e:
return
'''
run 2 stops the 1st instance every 120 seconds and saves the recorded flows
m - mouse controller
t - current time
'''
def run2(m, t):
try:
pas = False
while pas == False:
e = time.time()
if e - t >= 120:
m.click(750,450) #stop 2
time.sleep(0.25)
m.click(750,450)
time.sleep(0.5)
m.click(990,310) #save in time 2
time.sleep(0.25)
m.click(740,400) #start 2
t = e
pas = True
run2(m, t)
except KeyboardInterrupt as e:
return
'''
run 1 stops the 1st instance every 60 seconds and saves the recorded flows
m - mouse controller
t - current time
'''
def run1(m, t):
try:
pas = False
while pas == False:
e = time.time()
if e - t >= 60:
m.click(60,450) #stop 1
time.sleep(0.25)
m.click(60,450)
time.sleep(0.5)
m.click(390,310) #save in time 1
time.sleep(0.25)
m.click(60,400) #start 1
t = e
pas = True
run1(m, t)
except KeyboardInterrupt as e:
return
'''
Lets you choose a model out of the existent ones to become
the currently used one
'''
def choose_model(models, name):
print("Choose model to be used as the current model for "+ name)
for i in range(0,len(models)):
print(str(i+1)+ ". " + str(models[i]))
modelInd = input("\nWhich model (index)?\n")
if int(modelInd)-1 in range(0,len(models)):
return str(models[int(modelInd)-1])
else:
print("Choose an index\n")
choose_model(models, name)
'''
model - model to be retrained
data - data used for retraining
THis function completely retrains a model from a data file
'''
def retrain_model(model, data):
global session, logger
encodedData = []
targetsF = []
if 'Time to Live' in data:
target= pd.DataFrame(data[['target']].applymap(lambda x: utils.get_Target(x)))
targetsF = U.to_categorical(target, num_classes=2)
print(data.columns)
keep = ['Source IP', 'Dest IP', 'Source Port', 'Dest Port', 'Byte Size', 'Packet Length', 'Time to Live', 'Packets/Time']
encodedData = data[keep]
encodedData['Packet Type'] = data[['Packet Type']].applymap(lambda x: utils.get_Type_Coding(x))
print(encodedData.columns)
exit()
try:
retrainingInfo = train_test_split(encodedData, targetsF, test_size=0.2, random_state=42)
model.load_data(retrainingInfo.copy())
print("loaded")
session.run(tf.initialize_all_variables())
stats = model.train(20, 6, 9, 'filePacketTrained', patience=50)
print("trained")
try:
with session.as_default():
with session.graph.as_default():
score = model.evaluate()
model.save_model('trafficModels')
print('Test loss: ' + str(round(score[0], 3)))
print('Test accuracy ' + str(round(score[1], 3)) + " (+/-" + str(numpy.std(round(score[1], 3))) + ")")
plt.plot(stats['train_loss'])
plt.plot(stats['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
print(stats)
except Exception as ex:
logger.log('Error s ', ex, ex.__traceback__.tb_lineno)
except Exception as e:
logger.error("Exception occurred", exc_info=True)
else:
encodedData = pd.DataFrame(columns=utils.getPacketNames())
encodedData = data.apply(lambda x: utils.labelEncoder(x, 'Training')[0], axis=1)
encodedData.columns = utils.getPacketNames()
targets = data['target']
targetsF = U.to_categorical(targets.copy(), num_classes=2)
try:
retrainingInfo = train_test_split(encodedData, targetsF, test_size=0.2, random_state=42)
model.load_data(retrainingInfo.copy())
print("loaded")
session.run(tf.initialize_all_variables())
stats = model.train(20, 6, 8, 'filePacketTrained', patience=20)
print("trained")
try:
with session.as_default():
with session.graph.as_default():
score = model.evaluate()
model.save_model('trafficModels')
print('Test loss: ' + str(round(score[0], 3)))
print('Test accuracy ' + str(round(score[1], 3)) + " (+/-" + str(numpy.std(round(score[1], 3))) + ")")
plt.plot(stats['train_loss'])
plt.plot(stats['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
print(stats)
except Exception as ex:
logger.log('Error s ', ex, ex.__traceback__.tb_lineno)
except Exception as e:
logger.error("Exception occurred", exc_info=True)
'''
name- name of the training data type (i.e packets or flows)
Function lets you choose the data to be used for retraining
'''
def get_training_data(name):
global ROOT_DIR
name = name.replace('Models','Data')
print(name)
dataFiles = os.listdir(os.path.join(ROOT_DIR, name))
chosenFile = None
while chosenFile not in dataFiles:
print("Choose a data file index to be used for training:\n")
for i in range(len(dataFiles)):
print(str(i)+". "+str(dataFiles[i]))
chosenFile = input("\n Data:\n")
if int(chosenFile) not in range(len(dataFiles)):
print("Please choose an index")
else:
chosenFile = dataFiles[int(chosenFile)]
print(chosenFile)
if name == 'trafficData' and chosenFile != "final dataset.arff":
data = | pd.read_csv(name+"/"+chosenFile, delimiter=',') | pandas.read_csv |
#Utility functions for Gym
import pandas as pd
import numpy as np
import math
import json
import logging
import gym
import gym_solventx
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from gym import logger
from gym_solventx.envs import templates
class SolventXEnvUtilities:
"""SolventX environment."""
def get_config_dict(self,config_file):
"""Read config file create confi dict."""
assert 'json' in config_file, 'Config file must be a json file!'
config_keys = templates.config_keys
design_config = read_config(config_file)
config_dict = {}
for key in config_keys:
if key in design_config.keys():
config_dict.update({key:design_config[key]})
else:
raise ValueError(f'{key} not found in config JSON file!')
return config_dict
def get_logscale(self,design_variable_config):
"""Calculate logscale."""
logscale_min = min([design_variable_config['H+ Extraction']['lower'], design_variable_config['H+ Scrub']['lower'], design_variable_config['H+ Strip']['lower']])
logscale_max = max([design_variable_config['H+ Extraction']['upper'], design_variable_config['H+ Scrub']['upper'], design_variable_config['H+ Strip']['upper']])
#log scaled list ranging from lower to upper bounds of h+, including an out of bounds value for invalid actions consistency
logscale = np.array(sorted(list(np.logspace(math.log10(logscale_min), math.log10(logscale_max), base=10, num=50))\
+[logscale_min-1]+[logscale_max+1]))
return {'logscale':logscale}
def get_manipulated_variables(self,combined_var_space,environment_config):
"""Create a dictionary of continuous actions."""
"""{0:{},1:{'type':'(HA)2(org)','index':0},2:{'type':'H+ Scrub','index':1}}"""
manipulated_variables = combined_var_space.copy()
logger.info(f'{self.name}:Following variables were found:{[j.strip("-012") for j in manipulated_variables.keys()]}')
for variable in combined_var_space:
if variable.strip('-012') not in environment_config['action_variables']: #Only keep user specified manipulated variables
logger.info(f'Removing {variable} since it is not in action variables list.')
del manipulated_variables[variable]
if variable.strip('-012') in templates.constant_variables and variable.strip('-012') in manipulated_variables: #Remove constant variables
logger.info(f'Removing {variable} since it is in constant variable list.')
del manipulated_variables[variable]
return manipulated_variables
def create_continuous_action_dict(self,manipulated_variables,variable_config,environment_config):
"""Create a dictionary of continuous actions."""
"""{0:{},1:{'type':'(HA)2(org)','index':0},2:{'type':'H+ Scrub','index':1}}"""
logger.info(f'{self.name}:Creating continuous action dictionary...')
continuous_action_dict = {}
i=0
for variable,index in manipulated_variables.items():
action = i
action_variable = variable.strip('-012') #Remove module numbers from variables list
continuous_action_dict.update({action:{'type':action_variable,'index':index,
'min':variable_config[action_variable]['lower'],
'max':variable_config[action_variable]['upper']}})
logger.debug(f'{self.name}:Converted {variable} into action {action}')
i = i + 1
return continuous_action_dict
def create_discrete_action_dict(self,manipulated_variables,variable_config,environment_config):
"""Create a dictionary of discrete actions."""
"""{0:{},1:{'(HA)2(org)':0.05},2:{'(HA)2(org)':-0.05}}"""
n_increment_actions = environment_config['increment_actions_per_variable']
n_decrement_actions = environment_config['decrement_actions_per_variable']
total_increment_actions = n_increment_actions*len(manipulated_variables)
total_decrement_actions = n_decrement_actions*len(manipulated_variables)
logger.info(f'Total increment actions:{total_increment_actions},Total decrement actions:{total_decrement_actions}')
logger.info(f'{self.name}:Creating discrete action dictionary...')
action_dict = {}
action_dict.update({0:{}})
i = 1
for variable,index in manipulated_variables.items():
if n_increment_actions>0:
for j in range(1,n_increment_actions+1):
action_variable = variable.strip('-012') #Remove module numbers from variables list
if variable_config[action_variable]['scale'] == 'linear':
delta_value = j*variable_config[action_variable]['delta']
elif variable_config[action_variable]['scale'] == 'discrete':
delta_value = int(j*variable_config[action_variable]['delta'])
elif variable_config[action_variable]['scale'] == 'log':
delta_value = 10**(j*variable_config[action_variable]['delta']) #Convert log to actual number
elif variable_config[action_variable]['scale'] == 'pH':
delta_value = 10**(-j*variable_config[action_variable]['delta']) #Convert pH to actual number
else:
raise ValueError(f'{variable_config[action_variable]["scale"]} is an invalid scale for {action_variable} in increment action!')
action_dict.update({i:{'type':action_variable,'delta':delta_value,'index':index}})
logger.debug(f'{self.name}:Converted incriment {action_dict[i]["delta"]:.2f} ({variable_config[action_variable]["scale"]} scale) for variable {action_variable} into action {i}')
i = i+1
if n_decrement_actions>0:
for k in range(1,n_decrement_actions+1):
if variable_config[action_variable]['scale'] == 'linear':
delta_value = -k*variable_config[action_variable]['delta']
elif variable_config[action_variable]['scale'] == 'discrete':
delta_value = int(-k*variable_config[action_variable]['delta'])
elif variable_config[action_variable]['scale'] == 'log':
delta_value = -10**(k*variable_config[action_variable]['delta']) #Convert log to actual number
elif variable_config[action_variable]['scale'] == 'pH':
delta_value = -10**(-k*variable_config[action_variable]['delta']) #Convert pH to actual number
else:
raise ValueError(f'{variable_config[action_variable]["scale"]} is an invalid scale for {action_variable} in decrement action!')
action_dict.update({i:{'type':action_variable,'delta':delta_value,'index':index}})
logger.debug(f'{self.name}:Converted decriment {action_dict[i]["delta"]:.2f} ({variable_config[action_variable]["scale"]} scale) for variable {action_variable} into action {i}')
i = i+1
return action_dict
def create_observation_dict(self,combined_var_space,input_compositions,observed_variables):
"""Create a list of all design variables in every stage."""
observed_var_space = {}
observed_var_space.update({variable:index for variable,index in combined_var_space.items() if variable.strip('-012') in observed_variables})
observed_var_space.update({component:index for index,component in enumerate(input_compositions) if component in observed_variables})
#for variable in templates.constant_variables: #Remove constant variables
# if variable in observed_var_space:
# del observed_var_space[variable]
logger.info(f'Following observation variables were found:{list(observed_var_space.keys())}')
return observed_var_space
def check_reward_config(self):
"""Check reward dictionary."""
reward_weights = []
for goal in self.environment_config['goals']:
min_level = next(iter(self.reward_config['metrics'][goal]['thresholds']))
min_threshold = self.reward_config['metrics'][goal]['thresholds'][min_level]['threshold']
logger.debug(f'Minimum threshold {min_level} for {goal} is:{min_threshold}')
for _,metric_config in self.reward_config['metrics'][goal]['thresholds'].items():
if min_threshold > metric_config['threshold']:
raise ValueError('Threshold for {goal}:{metric_config["threshold"]} should be greater than minimum threshold:{min_threshold}')
reward_weights.append(self.reward_config['metrics'][goal]['weight'])
if not math.isclose(np.mean(reward_weights), 1.0,abs_tol=0.001):
raise ValueError(f'Mean of the reward weights is {np.mean(reward_weights):.3f} which is greater that 1.0!')
def get_simple_metrics(self):
"""Return the solvent extraction design."""
recovery = {'agent-recovery-'+key:value[0] for key, value in self.sx_design.recovery.items() if key.startswith("Strip")}
purity = {'agent-purity-'+key:value for key, value in self.sx_design.purity.items() if key.startswith("Strip")}
recority = {'agent-recority-'+key:value[0] for key, value in self.sx_design.recority.items() if key.startswith("Strip")}
return recovery,purity,recority
def collect_initial_metrics(self):
"""Collect inital metric values."""
logger.info(f'{self.name}:Collecting metrics at beginning of episode {self.episode_count}')
recovery,purity,recority = self.get_simple_metrics()
self.collect_initial_recovery(recovery)
self.collect_initial_purity(purity)
self.collect_initial_recority(recority)
def collect_final_metrics(self):
"""Check reward dictionary."""
logger.info(f'{self.name}:Collecting metrics at end of episode {self.episode_count}')
recovery,purity,recority = self.get_simple_metrics()
self.collect_final_recovery(recovery)
self.collect_final_purity(purity)
self.collect_final_recority(recority)
def collect_initial_purity(self,purity):
"""Collect purity in a dataframe."""
logger.debug(f'{self.name}:Collecting purity at beginning of {self.episode_count}')
self.initial_purity_df = self.initial_purity_df.append(purity, ignore_index=True)
def collect_initial_recovery(self,recovery):
"""Collect recovery in a dataframe."""
logger.debug(f'{self.name}:Collecting recovery at beginning of {self.episode_count}')
self.initial_recovery_df = self.initial_recovery_df.append(recovery, ignore_index=True)
def collect_initial_recority(self,recority):
"""Collect recority in a dataframe."""
logger.debug(f'{self.name}:Collecting recority at beginning of {self.episode_count}')
self.initial_recority_df = self.initial_recority_df.append(recority, ignore_index=True)
def collect_final_purity(self,purity):
"""Collect purity in a dataframe."""
logger.debug(f'{self.name}:Collecting purity at end of episode {self.episode_count}')
self.final_purity_df = self.final_purity_df.append(purity, ignore_index=True)
def collect_final_recovery(self,recovery):
"""Collect recovery in a dataframe."""
logger.debug(f'{self.name}:Collecting recovery at end of episode {self.episode_count}')
self.final_recovery_df = self.final_recovery_df.append(recovery, ignore_index=True)
def collect_final_recority(self,recority):
"""Collect recority in a dataframe."""
logger.debug(f'{self.name}:Collecting recority at end of episode {self.episode_count}')
self.final_recority_df = self.final_recority_df.append(recority, ignore_index=True)
def get_design(self):
"""Return the solvent extraction design."""
design_dict = {key:self.sx_design.x[index] for key, index in self.sx_design.combined_var_space.items() if key.strip('-012') in self.design_variable_config}
design_dict.update({composition:self.sx_design.ree_mass[index] for index, composition in enumerate(self.sx_design.ree) if composition in self.composition_variable_config})
return design_dict
def collect_initial_design(self):
"""Collect the solvent extraction design at end of episode."""
design_dict = self.get_design()
logger.debug(f'{self.name}:Collecting design {design_dict} at start of episode {self.episode_count}')
self.initial_design_df = self.initial_design_df.append(design_dict, ignore_index=True,sort=True)
def collect_final_design(self):
"""Collect the solvent extraction design at end of episode."""
design_dict = self.get_design()
logger.debug(f'{self.name}:Collecting design {design_dict} at end of episode {self.episode_count}')
self.final_design_df = self.final_design_df.append(design_dict, ignore_index=True,sort=True)
def show_all_initial_metrics(self):
"""Show metric statistics for all episodes"""
print(f'Initial Recovery,Purity, and recority over {self.episode_count} episodes:')
print(pd.concat([self.initial_recovery_df,self.initial_purity_df,self.initial_recority_df], axis=1))
def show_all_final_metrics(self):
"""Show metric statistics for all episodes"""
print(f'Final Recovery,Purity, and recority over {self.episode_count} episodes:')
print(pd.concat([self.final_recovery_df,self.final_purity_df,self.final_recority_df], axis=1))
def show_initial_metric_statistics(self):
"""Show metric statistics."""
print(f'Initial Recovery statistics after {self.episode_count} episodes:')
print(self.initial_recovery_df.describe())
print(f'Initial Purity statistics after {self.episode_count} episodes:')
print(self.initial_purity_df.describe())
print(f'Initial Recority statistics after {self.episode_count} episodes:')
print(self.initial_recority_df.describe())
def show_final_metric_statistics(self):
"""Show final metric statistics."""
print(f'Final Recovery statistics after {self.episode_count} episodes:')
print(self.final_recovery_df.describe())
print(f'Final Purity statistics after {self.episode_count} episodes:')
print(self.final_purity_df.describe())
print(f'Final Recority statistics after {self.episode_count} episodes:')
print(self.final_recority_df.describe())
def show_initial_design(self):
"""Show initial design statistics."""
print(f'Initial solvent design over {self.episode_count} episodes:')
print(self.initial_design_df)
print(f'Initial solvent design statistics over {self.episode_count} episodes:')
print(self.initial_design_df.describe())
def show_final_design(self):
"""Show final design statistics."""
print(f'Final solvent design over {self.episode_count} episodes:')
print(self.final_design_df)
print(f'Final solvent design statistics over {self.episode_count} episodes:')
print(self.final_design_df.describe())
def save_metrics(self):
"""Save metrics."""
logger.info(f'{self.name}:Saving metrics dataframe for {self.episode_count} episodes')
pd.concat([self.initial_recovery_df,self.initial_purity_df,self.initial_recority_df], axis=1).to_csv(self.name+'_initial_metrics.csv')
| pd.concat([self.final_recovery_df,self.final_purity_df,self.final_recority_df], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import pickle
from embdata.misc import *
from embdata.models import CellsInfo, Expression
from PGCAltas.utils.statUniversal import split_arr
# def g_select(gpool_max):
# patched_func = None
# g_pool = pool.Pool(gpool_max)
# g = list()
# queue = list()
#
# def inner(instance, *args, **kwargs):
# setattr(instance, 'g', g)
# setattr(instance, 'queue', queue)
# patched_func(instance, *args, **kwargs)
# for task, arg in g:
# g_pool.apply_async(task, args=arg)
# g_pool.join()
# print(len(queue))
# return queue
#
# def g_patch(func):
# nonlocal patched_func
# patched_func = func
# return inner
#
# return g_patch
class DataGuider(object):
flags = ('pos', 'neg')
whole_genes = 29452
PKL_DIR = 'pickles'
CSV_DIR = 'texts'
def __init__(self, file, db_parttion=None):
# file_path
self.file = os.path.join(ROOT_, file)
self.pkl_file = os.path.join(ROOT_, self.PKL_DIR, file).rsplit('.', 1)[0]
self.csv_file = os.path.join(ROOT_, self.CSV_DIR, file).rsplit('.', 1)[0]
# built_df
self.datframe = None
self._numpy = None
self._index = None
self._header = None
self.db_partition = db_parttion or settings.EXPR_DB_PARTITION
self.cell_guider = None
self._expr_dict = dict()
def build_datframe(self, datframe=None, **kwtable):
if isinstance(datframe, pd.DataFrame):
self.datframe = datframe.copy()
else:
self.datframe = pd.read_table(self.file, **kwtable)
temp = [self.datframe.loc[self.datframe.flag == i]
.iloc[:, 0:-1] for i in self.flags]
self._numpy = [i.to_numpy(dtype=np.int32) for i in temp]
self._index = [i.index.to_numpy(dtype=np.int32) for i in temp]
self._header = temp[0].columns.to_numpy(dtype=np.int32)
return self
def melt_guider(self, flag):
_numpy = self._numpy[flag]
indices = self._index[flag]
columns = self._header
row, col = _numpy.shape
cell_guider = np.zeros((row * col, 3), dtype=np.int32)
idx = 0
for r in range(row):
x = indices[r]
for c in range(col):
y = columns[c]
z = _numpy[r, c]
cell_guider[idx, :] = [x, y, z]
idx += 1
return cell_guider[cell_guider[:, 2] > 0]
@property
def _guiders(self):
return {k: self.melt_guider(i) for i, k in enumerate(self.flags)}
# @g_select(5)
def select_cell_from_guiders(self):
"""
:return: {
'neg': {
"type1": [cid, ...],
},
'pos': {"type1": [cid, ...],
}
}
"""
ret = {k: dict() for k in self.flags}
for k, v in self._guiders.items():
for x, y, z in v:
# TODO: Python3.7 choice can't used to queryset generator, plz use first qs
# qs = np.random.choice(list(CellsInfo.query.filter(type_id=x, stage_id=y)), size=z, replace=False)
qs = np.random.choice(CellsInfo.query.filter(type_id=x, stage_id=y), size=z, replace=False)
# db partition
if ret[k].get(x, None):
ret[k][x].extend([q.id for q in qs])
else:
ret[k][x] = [q.id for q in qs]
self._2pickle(ret, 'select_dict')
self.cell_guider = ret
def build_expr_set(self, pkl_name='select_dict', fold=10):
if self.cell_guider is None:
self._load_from_pickle(pkl_name, 'cell_guider')
n = sum([len(d) for d in self.cell_guider.values()])
print("Total Queried Types: %d" % n)
for k, v in self.cell_guider.items():
self._expr_dict[k] = list()
for ctype, cids in v.items():
print("CellTypeId: %s\tSelected: %s" % (ctype, len(cids)))
arrs = split_arr(cids, fold)
for idx, splited_cids in enumerate(arrs):
print("|---Batch: %s / %s" % (idx+1, len(arrs)))
mtx_df = self._build_part_expr(ctype, splited_cids)
self._expr_dict[k].append(mtx_df)
self._2pickle(self._expr_dict, 'expr_dict')
def _build_part_expr(self, ctype, cids):
mtx = np.zeros([len(cids), self.whole_genes])
qs = Expression.query.filter(ctype=ctype, cid_id__in=cids)
row_names = list()
cur_c, row = -1, -1
for q in qs:
c, g, e = q.cid_id, q.gid_id-1, q.expr
if c != cur_c:
row_names.append(c)
row += 1
cur_c = c
mtx[row][g] = e
return pd.DataFrame(mtx, index=row_names)
@property
def expr(self):
if hasattr(self, '__expr'):
return getattr(self, '__expr')
if not self._expr_dict:
self._load_from_pickle('expr_dict', '_expr_dict')
ret = dict()
for k, v in self._expr_dict.items():
if len(v) == 0:
continue
if len(v) == 1:
temp = v[0]
else:
temp = | pd.concat(v) | pandas.concat |
import os
import pandas as pd
import csv
from os.path import isfile, join, abspath, dirname
import warnings
def consolidate_csvs(energy_data_path, fcas_data_path, output_folder, output_prefix, price_column_name = "RRP", region="SA1"):
"""
Takes a folder with MMS csvs and create a new csv
with just the demand and energy data.
Assumes that all csvs in the directory are
to be used and follow the MMS format.
Warns the user if there are missing datetimes.
Args:
data_path: string
Absolute path to directory containing csvs with MMS data.
output_folder: string
Absolute path to directory where outputted csvs will be created.
output_prefix: string
Prefix for the filename of the outputted csvs.
region: string
RegionID for the desired region data.
Returns:
None
"""
five_min_df = pd.DataFrame(
columns=["Timestamp", "Region", "Energy_Price", "Energy_Demand", "5min_Raise_Demand", "5min_Lower_Demand"])
thirty_min_df = pd.DataFrame(
columns=["Timestamp", "Region", "Energy_Price", "Energy_Demand", "5min_Raise_Demand", "5min_Lower_Demand"])
fcas_df = pd.DataFrame(
columns = ["Timestamp", "5min_Raise_Price", "5min_Lower_Price"])
# grab csvs from the specified energy data folder
onlycsvs = [
join(energy_data_path, f) for f in os.listdir(energy_data_path)
if isfile(join(energy_data_path, f)) and f.lower().endswith(".csv")
]
for csv_name in onlycsvs:
print("Reading {}".format(csv_name.split("/")[-1]))
with open(csv_name) as csvfile:
reader = csv.reader(csvfile)
demand_index = None
price_index = None
raise_demand_index = None
lower_demand_index = None
timestamp_index = None
region_index = None
freq = None
for row in reader:
if row[0] == "C":
# logging rows are useless
pass
elif row[0] == "I":
# header row (sometimes the format of the csv changes
# in the middle so there can be multiple header rows)
demand_index = row.index("TOTALDEMAND")
raise_demand_index = row.index("RAISE5MINLOCALDISPATCH")
lower_demand_index = row.index("LOWER5MINLOCALDISPATCH")
price_index = row.index(price_column_name)
timestamp_index = row.index("SETTLEMENTDATE")
region_index = row.index("REGIONID")
if row[1] == "DREGION":
freq = 5
elif row[1] == "TREGION":
freq = 30
else:
freq = None
elif row[0] == "D":
# data row
data = {}
data["Timestamp"] = | pd.to_datetime(row[timestamp_index]) | pandas.to_datetime |
import os
import unittest
import pandas as pd
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn_pandas import DataFrameMapper
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from nyoka import skl_to_pmml
class TestMethods(unittest.TestCase):
def test_sklearn_01(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('svm',SVC())
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,"svc_pmml.pmml")
self.assertEqual(os.path.isfile("svc_pmml.pmml"),True)
def test_sklearn_02(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('scaling',StandardScaler()),
('knn',KNeighborsClassifier(n_neighbors = 5))
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,"knn_pmml.pmml")
self.assertEqual(os.path.isfile("knn_pmml.pmml"),True)
def test_sklearn_03(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("rfc", RandomForestClassifier(n_estimators = 100))
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "rf_pmml.pmml")
self.assertEqual(os.path.isfile("rf_pmml.pmml"),True)
def test_sklearn_04(self):
titanic = pd.read_csv("nyoka/tests/titanic_train.csv")
titanic['Embarked'] = titanic['Embarked'].fillna('S')
features = list(titanic.columns.drop(['PassengerId','Name','Ticket','Cabin','Survived']))
target = 'Survived'
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['Sex'], LabelEncoder()),
(['Embarked'], LabelEncoder())
])),
("imp", Imputer(strategy="median")),
("gbc", GradientBoostingClassifier(n_estimators = 10))
])
pipeline_obj.fit(titanic[features],titanic[target])
skl_to_pmml(pipeline_obj, features, target, "gb_pmml.pmml")
self.assertEqual(os.path.isfile("gb_pmml.pmml"),True)
def test_sklearn_05(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg'],axis=1)
y = df['mpg']
features = [name for name in df.columns if name not in ('mpg')]
target = 'mpg'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', TfidfVectorizer())
])),
('model',DecisionTreeRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"dtr_pmml.pmml")
self.assertEqual(os.path.isfile("dtr_pmml.pmml"),True)
def test_sklearn_06(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',LinearRegression())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"linearregression_pmml.pmml")
self.assertEqual(os.path.isfile("linearregression_pmml.pmml"),True)
def test_sklearn_07(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("lr", LogisticRegression())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "logisticregression_pmml.pmml")
self.assertEqual(os.path.isfile("logisticregression_pmml.pmml"),True)
def test_sklearn_08(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('pca',PCA(2)),
('mod',LogisticRegression())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "logisticregression_pca_pmml.pmml")
self.assertEqual(os.path.isfile("logisticregression_pca_pmml.pmml"),True)
def test_sklearn_09(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdclassifier_pmml.pmml")
self.assertEqual(os.path.isfile("sgdclassifier_pmml.pmml"),True)
def test_sklearn_10(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("lsvc", LinearSVC())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "linearsvc_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvc_pmml.pmml"),True)
def test_sklearn_11(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',LinearSVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"linearsvr_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvr_pmml.pmml"),True)
def test_sklearn_12(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',GradientBoostingRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"gbr.pmml")
self.assertEqual(os.path.isfile("gbr.pmml"),True)
def test_sklearn_13(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", DecisionTreeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "dtr_clf.pmml")
self.assertEqual(os.path.isfile("dtr_clf.pmml"),True)
def test_sklearn_14(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',RandomForestRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"rfr.pmml")
self.assertEqual(os.path.isfile("rfr.pmml"),True)
def test_sklearn_15(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',KNeighborsRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"knnr.pmml")
self.assertEqual(os.path.isfile("knnr.pmml"),True)
def test_sklearn_16(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',SVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"svr.pmml")
self.assertEqual(os.path.isfile("svr.pmml"),True)
def test_sklearn_17(self):
irisdata = datasets.load_iris()
iris = | pd.DataFrame(irisdata.data,columns=irisdata.feature_names) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.