prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# pylint: disable-msg=E1101,E1103
# pylint: disable-msg=W0212,W0703,W0231,W0622
from cStringIO import StringIO
import sys
from numpy import NaN
import numpy as np
from pandas.core.common import (_pickle_array, _unpickle_array)
from pandas.core.frame import DataFrame, _try_sort, _extract_index
from pandas.core.index import Index, NULL_INDEX
from pandas.core.series import Series
import pandas.core.common as common
import pandas.core.datetools as datetools
import pandas.lib.tseries as tseries
#-------------------------------------------------------------------------------
# DataMatrix class
class DataMatrix(DataFrame):
"""
Matrix version of DataFrame, optimized for cross-section operations,
numerical computation, and other operations that do not require the
frame to change size.
Parameters
----------
data : numpy ndarray or dict of sequence-like objects
Dict can contain Series, arrays, or list-like objects
Constructor can understand various kinds of inputs
index : Index or array-like
Index to use for resulting frame (optional if provided dict of Series)
columns : Index or array-like
Required if data is ndarray
dtype : dtype, default None (infer)
Data type to force
Notes
-----
Transposing is much faster in this regime, as is calling getXS, so please
take note of this.
"""
objects = None
def __init__(self, data=None, index=None, columns=None, dtype=None,
objects=None):
if isinstance(data, dict) and len(data) > 0:
(index, columns,
values, objects) = self._initDict(data, index, columns, objects,
dtype)
elif isinstance(data, (np.ndarray, list)):
(index, columns, values) = self._initMatrix(data, index,
columns, dtype)
if objects is not None:
if isinstance(objects, DataMatrix):
if not objects.index.equals(index):
objects = objects.reindex(index)
else:
objects = DataMatrix(objects, index=index)
elif isinstance(data, DataFrame):
if not isinstance(data, DataMatrix):
data = data.toDataMatrix()
values = data.values
index = data.index
columns = data.columns
objects = data.objects
elif data is None or len(data) == 0:
# this is a touch convoluted...
if objects is not None:
if isinstance(objects, DataMatrix):
if index is not None and objects.index is not index:
objects = objects.reindex(index)
else:
objects = DataMatrix(objects, index=index)
index = objects.index
if index is None:
N = 0
index = NULL_INDEX
else:
N = len(index)
if columns is None:
K = 0
columns = NULL_INDEX
else:
K = len(columns)
values = np.empty((N, K), dtype=dtype)
values[:] = NaN
else:
raise Exception('DataMatrix constructor not properly called!')
self.values = values
self.index = index
self.columns = columns
self.objects = objects
def _initDict(self, data, index, columns, objects, dtype):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Somehow this got outrageously complicated
"""
# pre-filter out columns if we passed it
if columns is not None:
colset = set(columns)
data = dict((k, v) for k, v in data.iteritems() if k in colset)
index = _extract_index(data, index)
objectDict = {}
if objects is not None and isinstance(objects, dict):
objectDict.update(objects)
valueDict = {}
for k, v in data.iteritems():
if isinstance(v, Series):
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index)
else:
if isinstance(v, dict):
v = [v.get(i, NaN) for i in index]
else:
assert(len(v) == len(index))
try:
v = Series(v, dtype=dtype, index=index)
except Exception:
v = Series(v, index=index)
if issubclass(v.dtype.type, (np.bool_, float, int)):
valueDict[k] = v
else:
objectDict[k] = v
if columns is None:
columns = Index(_try_sort(valueDict))
objectColumns = Index(_try_sort(objectDict))
else:
objectColumns = Index([c for c in columns if c in objectDict])
columns = Index([c for c in columns if c not in objectDict])
if len(valueDict) == 0:
dtype = np.object_
valueDict = objectDict
columns = objectColumns
else:
dtypes = set(v.dtype for v in valueDict.values())
if len(dtypes) > 1:
dtype = np.float_
else:
dtype = list(dtypes)[0]
if len(objectDict) > 0:
new_objects = DataMatrix(objectDict,
dtype=np.object_,
index=index,
columns=objectColumns)
if isinstance(objects, DataMatrix):
objects = objects.join(new_objects, how='left')
else:
objects = new_objects
values = np.empty((len(index), len(columns)), dtype=dtype)
for i, col in enumerate(columns):
if col in valueDict:
values[:, i] = valueDict[col]
else:
values[:, i] = np.NaN
return index, columns, values, objects
def _initMatrix(self, values, index, columns, dtype):
if not isinstance(values, np.ndarray):
arr = np.array(values)
if issubclass(arr.dtype.type, basestring):
arr = np.array(values, dtype=object, copy=True)
values = arr
if values.ndim == 1:
N = values.shape[0]
if N == 0:
values = values.reshape((values.shape[0], 0))
else:
values = values.reshape((values.shape[0], 1))
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
pass
N, K = values.shape
if index is None:
if N == 0:
index = NULL_INDEX
else:
index = np.arange(N)
if columns is None:
if K == 0:
columns = NULL_INDEX
else:
columns = np.arange(K)
return index, columns, values
@property
def _constructor(self):
return DataMatrix
# Because of DataFrame property
values = None
def __array__(self):
return self.values
def __array_wrap__(self, result):
return DataMatrix(result, index=self.index, columns=self.columns)
#-------------------------------------------------------------------------------
# DataMatrix-specific implementation of private API
def _join_on(self, other, on):
if len(other.index) == 0:
return self
if on not in self:
raise Exception('%s column not contained in this frame!' % on)
fillVec, mask = tseries.getMergeVec(self[on],
other.index.indexMap)
tmpMatrix = other.values.take(fillVec, axis=0)
tmpMatrix[-mask] = NaN
seriesDict = dict((col, tmpMatrix[:, j])
for j, col in enumerate(other.columns))
if getattr(other, 'objects'):
objects = other.objects
tmpMat = objects.values.take(fillVec, axis=0)
tmpMat[-mask] = NaN
objDict = dict((col, tmpMat[:, j])
for j, col in enumerate(objects.columns))
seriesDict.update(objDict)
filledFrame = DataFrame(data=seriesDict, index=self.index)
return self.join(filledFrame, how='left')
def _reindex_index(self, index, method):
if index is self.index:
return self.copy()
if not isinstance(index, Index):
index = Index(index)
if len(self.index) == 0:
return DataMatrix(index=index, columns=self.columns)
indexer, mask = common.get_indexer(self.index, index, method)
mat = self.values.take(indexer, axis=0)
notmask = -mask
if len(index) > 0:
if notmask.any():
if issubclass(mat.dtype.type, np.int_):
mat = mat.astype(float)
elif issubclass(mat.dtype.type, np.bool_):
mat = mat.astype(float)
common.null_out_axis(mat, notmask, 0)
if self.objects is not None and len(self.objects.columns) > 0:
newObjects = self.objects.reindex(index)
else:
newObjects = None
return DataMatrix(mat, index=index, columns=self.columns,
objects=newObjects)
def _reindex_columns(self, columns):
if len(columns) == 0:
return DataMatrix(index=self.index)
if not isinstance(columns, Index):
columns = Index(columns)
if self.objects is not None:
object_columns = columns.intersection(self.objects.columns)
columns = columns - object_columns
objects = self.objects._reindex_columns(object_columns)
else:
objects = None
if len(columns) > 0 and len(self.columns) == 0:
return DataMatrix(index=self.index, columns=columns,
objects=objects)
indexer, mask = common.get_indexer(self.columns, columns, None)
mat = self.values.take(indexer, axis=1)
notmask = -mask
if len(mask) > 0:
if notmask.any():
if issubclass(mat.dtype.type, np.int_):
mat = mat.astype(float)
elif issubclass(mat.dtype.type, np.bool_):
mat = mat.astype(float)
common.null_out_axis(mat, notmask, 1)
return DataMatrix(mat, index=self.index, columns=columns,
objects=objects)
def _rename_columns_inplace(self, mapper):
self.columns = [mapper(x) for x in self.columns]
if self.objects is not None:
self.objects._rename_columns_inplace(mapper)
def _combineFrame(self, other, func):
"""
Methodology, briefly
- Really concerned here about speed, space
- Get new index
- Reindex to new index
- Determine newColumns and commonColumns
- Add common columns over all (new) indices
- Fill to new set of columns
Could probably deal with some Cython action in here at some point
"""
need_reindex = False
if self.index.equals(other.index):
newIndex = self.index
else:
newIndex = self.index.union(other.index)
need_reindex = True
if not self and not other:
return DataMatrix(index=newIndex)
elif not self:
return other * NaN
elif not other:
return self * NaN
if self.columns.equals(other.columns):
newColumns = self.columns
else:
newColumns = self.columns.union(other.columns)
need_reindex = True or need_reindex
if need_reindex:
myReindex = self.reindex(index=newIndex,
columns=newColumns)
hisReindex = other.reindex(index=newIndex,
columns=newColumns)
else:
myReindex = self
hisReindex = other
myValues = myReindex.values
hisValues = hisReindex.values
return DataMatrix(func(myValues, hisValues),
index=newIndex, columns=newColumns)
def _combineSeries(self, other, func):
newIndex = self.index
newCols = self.columns
if len(self) == 0:
# Ambiguous case
return DataMatrix(index=self.index, columns=self.columns,
objects=self.objects)
if self.index._allDates and other.index._allDates:
# Operate row-wise
if self.index.equals(other.index):
newIndex = self.index
other_vals = other.values
values = self.values
else:
newIndex = self.index + other.index
if other.index.equals(newIndex):
other_vals = other.values
else:
other_vals = other.reindex(newIndex).values
if self.index.equals(newIndex):
values = self.values
else:
values = self.reindex(newIndex).values
resultMatrix = func(values.T, other_vals).T
else:
if len(other) == 0:
return self * NaN
newCols = self.columns.union(other.index)
# Operate column-wise
this = self.reindex(columns=newCols)
other = other.reindex(newCols).values
resultMatrix = func(this.values, other)
# TODO: deal with objects
return DataMatrix(resultMatrix, index=newIndex, columns=newCols)
def _combineFunc(self, other, func):
"""
Combine DataMatrix objects with other Series- or DataFrame-like objects
This is the core method used for the overloaded arithmetic methods
Result hierarchy
----------------
DataMatrix + DataFrame --> DataMatrix
DataMatrix + DataMatrix --> DataMatrix
DataMatrix + Series --> DataMatrix
DataMatrix + constant --> DataMatrix
The reason for 'upcasting' the result is that if addition succeed,
we can assume that the input DataFrame was homogeneous.
"""
newIndex = self.index
if isinstance(other, DataFrame):
return self._combineFrame(other, func)
elif isinstance(other, Series):
return self._combineSeries(other, func)
else:
if not self:
return self
# Constant of some kind
newCols = self.columns
resultMatrix = func(self.values, other)
# TODO: deal with objects
return DataMatrix(resultMatrix, index=newIndex, columns=newCols)
#-------------------------------------------------------------------------------
# Properties for index and columns
_columns = None
def _get_columns(self):
return self._columns
def _set_columns(self, cols):
if len(cols) != self.values.shape[1]:
raise Exception('Columns length %d did not match values %d!' %
(len(cols), self.values.shape[1]))
if not isinstance(cols, Index):
cols = Index(cols)
self._columns = cols
columns = property(fget=_get_columns, fset=_set_columns)
def _set_index(self, index):
if len(index) > 0:
if len(index) != self.values.shape[0]:
raise Exception('Index length %d did not match values %d!' %
(len(index), self.values.shape[0]))
if not isinstance(index, Index):
index = Index(index)
self._index = index
if self.objects is not None:
self.objects._index = index
def _get_index(self):
return self._index
index = property(fget=_get_index, fset=_set_index)
#-------------------------------------------------------------------------------
# "Magic methods"
def __getstate__(self):
if self.objects is not None:
objects = self.objects._matrix_state(pickle_index=False)
else:
objects = None
state = self._matrix_state()
return (state, objects)
def _matrix_state(self, pickle_index=True):
columns = _pickle_array(self.columns)
if pickle_index:
index = _pickle_array(self.index)
else:
index = None
return self.values, index, columns
def __setstate__(self, state):
(vals, idx, cols), object_state = state
self.values = vals
self.index = _unpickle_array(idx)
self.columns = _unpickle_array(cols)
if object_state:
ovals, _, ocols = object_state
self.objects = DataMatrix(ovals,
index=self.index,
columns=_unpickle_array(ocols))
else:
self.objects = None
def __nonzero__(self):
N, K = self.values.shape
if N == 0 or K == 0:
if self.objects is None:
return False
else:
return self.objects.__nonzero__()
else:
return True
def __neg__(self):
mycopy = self.copy()
mycopy.values = -mycopy.values
return mycopy
def __repr__(self):
"""Return a string representation for a particular DataMatrix"""
buffer = StringIO()
if len(self.cols()) == 0:
buffer.write('Empty DataMatrix\nIndex: %s' % repr(self.index))
elif 0 < len(self.index) < 500 and self.values.shape[1] < 10:
self.toString(buffer=buffer)
else:
print >> buffer, str(self.__class__)
self.info(buffer=buffer)
return buffer.getvalue()
def __getitem__(self, item):
"""
Retrieve column, slice, or subset from DataMatrix.
Possible inputs
---------------
single value : retrieve a column as a Series
slice : reindex to indices specified by slice
boolean vector : like slice but more general, reindex to indices
where the input vector is True
Examples
--------
column = dm['A']
dmSlice = dm[:20] # First 20 rows
dmSelect = dm[dm.count(axis=1) > 10]
Notes
-----
This is a magic method. Do NOT call explicity.
"""
if isinstance(item, slice):
indexRange = self.index[item]
return self.reindex(indexRange)
elif isinstance(item, np.ndarray):
if len(item) != len(self.index):
raise Exception('Item wrong length %d instead of %d!' %
(len(item), len(self.index)))
newIndex = self.index[item]
return self.reindex(newIndex)
else:
if self.objects is not None and item in self.objects:
return self.objects[item]
else:
return self._getSeries(item)
_dataTypes = [np.float_, np.bool_, np.int_]
def __setitem__(self, key, value):
"""
Add series to DataMatrix in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataMatrix's index or an error will be thrown.
Series/TimeSeries will be conformed to the DataMatrix's index to
ensure homogeneity.
"""
if hasattr(value, '__iter__'):
if isinstance(value, Series):
if value.index.equals(self.index):
# no need to copy
value = value.values
else:
value = value.reindex(self.index).values
else:
assert(len(value) == len(self.index))
if not isinstance(value, np.ndarray):
value = np.array(value)
if value.dtype.type == np.str_:
value = np.array(value, dtype=object)
else:
value = np.repeat(value, len(self.index))
if self.values.dtype == np.object_:
self._insert_object_dtype(key, value)
else:
self._insert_float_dtype(key, value)
def _insert_float_dtype(self, key, value):
isObject = value.dtype not in self._dataTypes
if key in self.columns:
loc = self.columns.indexMap[key]
self.values[:, loc] = value
elif isObject:
if self.objects is None:
self.objects = DataMatrix({key : value},
index=self.index)
else:
self.objects[key] = value
elif len(self.columns) == 0:
self.values = value.reshape((len(value), 1)).astype(np.float)
self.columns = Index([key])
else:
try:
loc = self.columns.searchsorted(key)
except TypeError:
loc = len(self.columns)
if loc == self.values.shape[1]:
newValues = np.c_[self.values, value]
newColumns = Index(np.concatenate((self.columns, [key])))
elif loc == 0:
newValues = np.c_[value, self.values]
newColumns = Index(np.concatenate(([key], self.columns)))
else:
newValues = np.c_[self.values[:, :loc], value,
self.values[:, loc:]]
toConcat = (self.columns[:loc], [key], self.columns[loc:])
newColumns = Index(np.concatenate(toConcat))
self.values = newValues
self.columns = newColumns
def _insert_object_dtype(self, key, value):
if key in self.columns:
loc = self.columns.indexMap[key]
self.values[:, loc] = value
elif len(self.columns) == 0:
self.values = value.reshape((len(value), 1)).copy()
self.columns = Index([key])
else:
try:
loc = self.columns.searchsorted(key)
except TypeError:
loc = len(self.columns)
if loc == self.values.shape[1]:
newValues = np.c_[self.values, value]
newColumns = Index(np.concatenate((self.columns, [key])))
elif loc == 0:
newValues = np.c_[value, self.values]
newColumns = Index(np.concatenate(([key], self.columns)))
else:
newValues = np.c_[self.values[:, :loc], value,
self.values[:, loc:]]
toConcat = (self.columns[:loc], [key], self.columns[loc:])
newColumns = Index(np.concatenate(toConcat))
self.values = newValues
self.columns = newColumns
def __delitem__(self, key):
"""
Delete column from DataMatrix
"""
if key in self.columns:
loc = self.columns.indexMap[key]
if loc == self.values.shape[1] - 1:
newValues = self.values[:, :loc]
newColumns = self.columns[:loc]
else:
newValues = np.c_[self.values[:, :loc], self.values[:, loc+1:]]
newColumns = Index(np.concatenate((self.columns[:loc],
self.columns[loc+1:])))
self.values = newValues
self.columns = newColumns
else:
if self.objects is not None and key in self.objects:
del self.objects[key]
else:
raise KeyError('%s' % key)
def __iter__(self):
"""Iterate over columns of the frame."""
return iter(self.columns)
def __contains__(self, key):
"""True if DataMatrix has this column"""
hasCol = key in self.columns
if hasCol:
return True
else:
if self.objects is not None and key in self.objects:
return True
return False
def iteritems(self):
return self._series.iteritems()
#-------------------------------------------------------------------------------
# Helper methods
# For DataFrame compatibility
def _getSeries(self, item=None, loc=None):
if loc is None:
try:
loc = self.columns.indexMap[item]
except KeyError:
raise Exception('%s not here!' % item)
return Series(self.values[:, loc], index=self.index)
def _getSeriesDict(self):
series = {}
for i, col in enumerate(self.columns):
series[col] = self._getSeries(loc=i)
if self.objects is not None:
for i, col in enumerate(self.objects.columns):
series[col] = self.objects._getSeries(loc=i)
return series
_series = property(_getSeriesDict)
#-------------------------------------------------------------------------------
# Outputting
def toString(self, buffer=sys.stdout, columns=None, colSpace=15,
nanRep='NaN', formatters=None, float_format=None):
"""
Output a string version of this DataMatrix
"""
_pf = common._pfixed
formatters = formatters or {}
if columns is None:
columns = self.columns
values = self.values
if self.objects:
columns = list(columns) + list(self.objects.columns)
values = np.column_stack((values.astype(object),
self.objects.values))
else:
columns = [c for c in columns if c in self]
values = self.asMatrix(columns)
ident = lambda x: x
idxSpace = max([len(str(idx)) for idx in self.index]) + 4
if len(self.cols()) == 0:
buffer.write('DataMatrix is empty!\n')
buffer.write(repr(self.index))
else:
buffer.write(_pf('', idxSpace))
for h in columns:
buffer.write(_pf(h, colSpace))
buffer.write('\n')
for i, idx in enumerate(self.index):
buffer.write(_pf(idx, idxSpace))
for j, col in enumerate(columns):
formatter = formatters.get(col, ident)
buffer.write(_pf(formatter(values[i, j]), colSpace,
float_format=float_format,
nanRep=nanRep))
buffer.write('\n')
def info(self, buffer=sys.stdout):
"""
Concise summary of a DataMatrix, used in __repr__ when very large.
"""
print >> buffer, 'Index: %s entries' % len(self.index),
if len(self.index) > 0:
print >> buffer, ', %s to %s' % (self.index[0], self.index[-1])
else:
print >> buffer, ''
if len(self.columns) == 0:
print >> buffer, 'DataMatrix is empty!'
print >> buffer, repr(self.index)
return
print >> buffer, 'Data columns:'
space = max([len(str(k)) for k in self.cols()]) + 4
counts = self.count()
cols = self.cols()
assert(len(cols) == len(counts))
columns = []
for col, count in counts.iteritems():
columns.append('%s%d non-null values' %
( | common._pfixed(col, space) | pandas.core.common._pfixed |
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = | pandas.DataFrame(data=expected) | pandas.DataFrame |
import calendar
from datetime import date, datetime, time
import locale
import unicodedata
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.timezones import maybe_get_tz
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
bdate_range, date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesDatetimeValues:
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_period = PeriodArray._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'day_name', 'month_name']
ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if | is_integer_dtype(result) | pandas.core.dtypes.common.is_integer_dtype |
import pytest
import pandas as pd
import numpy as np
@pytest.fixture(scope="function")
def set_helpers(request):
rand = np.random.RandomState(1337)
request.cls.ser_length = 120
request.cls.window = 12
request.cls.returns = pd.Series(
rand.randn(1, 120)[0] / 100.0,
index=pd.date_range("2000-1-30", periods=120, freq="M"),
)
request.cls.factor_returns = pd.Series(
rand.randn(1, 120)[0] / 100.0,
index=pd.date_range("2000-1-30", periods=120, freq="M"),
)
@pytest.fixture(scope="session")
def input_data():
simple_benchmark = pd.Series(
np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
)
rand = np.random.RandomState(1337)
noise = pd.Series(
rand.normal(0, 0.001, 1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
inv_noise = noise.multiply(-1)
noise_uniform = pd.Series(
rand.uniform(-0.01, 0.01, 1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
random_100k = pd.Series(rand.randn(100_000))
mixed_returns = pd.Series(
np.array([np.nan, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
)
one = [
-0.00171614,
0.01322056,
0.03063862,
-0.01422057,
-0.00489779,
0.01268925,
-0.03357711,
0.01797036,
]
two = [
0.01846232,
0.00793951,
-0.01448395,
0.00422537,
-0.00339611,
0.03756813,
0.0151531,
0.03549769,
]
# Sparse noise, same as noise but with np.nan sprinkled in
replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_noise = noise.replace(replace_nan, np.nan)
# Flat line tz
flat_line_1_tz = pd.Series(
np.linspace(0.01, 0.01, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
# Sparse flat line at 0.01
# replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_flat_line_1_tz = flat_line_1_tz.replace(replace_nan, np.nan)
df_index_simple = pd.date_range("2000-1-30", periods=8, freq="D")
df_index_week = pd.date_range("2000-1-30", periods=8, freq="W")
df_index_month = pd.date_range("2000-1-30", periods=8, freq="M")
df_week = pd.DataFrame(
{
"one": | pd.Series(one, index=df_index_week) | pandas.Series |
""" Model for output of general/metadata data, useful for a batch """
import logging
from pathlib import Path
from typing import List, Optional, Union
import pandas as pd
from pydantic import BaseModel, Field, validator
from nowcasting_dataset.consts import SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME
from nowcasting_dataset.filesystem.utils import check_path_exists
from nowcasting_dataset.utils import get_start_and_end_example_index
logger = logging.getLogger(__name__)
class SpaceTimeLocation(BaseModel):
"""Location of the example"""
t0_datetime_utc: pd.Timestamp = Field(
...,
description="The t0 of one example ",
)
x_center_osgb: float = Field(
...,
description="The x center of one example in OSGB coordinates",
)
y_center_osgb: float = Field(
...,
description="The y center of one example in OSGB coordinates",
)
id: Optional[int] = Field(
None,
description="The id of the GSP or the PV system. This is optional so can be None",
)
id_type: Optional[str] = Field(
None,
description="The type of the id. Should be either None, 'gsp' or 'pv_system'",
)
@validator("t0_datetime_utc")
def v_t0_datetime_utc(cls, t0_datetime_utc):
"""Make sure t0_datetime_utc is pandas Timestamp"""
return pd.Timestamp(t0_datetime_utc)
@validator("id_type")
def v_id_type(cls, id_type):
"""Make sure id_type is either None, 'gsp' or 'pv_system'"""
if id_type == "None":
id_type = None
assert id_type in [
None,
"gsp",
"pv_system",
], f"{id_type=} should be None, 'gsp' or 'pv_system'"
return id_type
class Metadata(BaseModel):
"""Class to store metadata data"""
batch_size: int = Field(
...,
g=0,
description="The size of this batch. If the batch size is 0, "
"then this item stores one data item",
)
space_time_locations: List[SpaceTimeLocation]
@property
def t0_datetimes_utc(self) -> list:
"""Return all the t0"""
return [location.t0_datetime_utc for location in self.space_time_locations]
@property
def x_centers_osgb(self) -> List[float]:
"""List of all the x centers from all the locations"""
return [location.x_center_osgb for location in self.space_time_locations]
@property
def y_centers_osgb(self) -> List[float]:
"""List of all the x centers from all the locations"""
return [location.y_center_osgb for location in self.space_time_locations]
@property
def ids(self) -> List[float]:
"""List of all the ids from all the locations"""
return [location.id for location in self.space_time_locations]
def save_to_csv(self, path):
"""
Save metadata to a csv file
Args:
path: the path where the file should be save
"""
filename = f"{path}/{SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME}"
metadata_dict = [location.dict() for location in self.space_time_locations]
# metadata_dict.pop("batch_size")
# if file exists, add to it
try:
check_path_exists(filename)
except FileNotFoundError:
metadata_df = | pd.DataFrame(metadata_dict) | pandas.DataFrame |
import io
import pkgutil
import re
import subprocess as sp
import tempfile
import bs4
import dmf_chip as dc
import dropbot as db
import dropbot.chip
import dropbot.proxy
import dropbot.self_test
import jinja2
import json_tricks
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import path_helpers as ph
import qrcode
def draw_results(chip_info, events, axes=None):
if axes is None:
fig, axes = plt.subplots(1, 2, figsize=(20, 20))
# For colors, see: https://gist.github.com/cfobel/fd939073cf13a309d7a9
dark_green = '#059748'
light_green = '#90cd97'
light_blue = '#88bde6'
dark_orange = '#df5c24'
dark_red = '#cb2027'
chip_info_mm = dc.to_unit(chip_info, 'mm')
electrode_channels = {e['id']: e['channels'][0]
for e in chip_info['electrodes']}
for i, ax in enumerate(axes):
result = dc.draw(chip_info, ax=ax, unit='mm', labels=(i == 0))
for id_i, p in result['patches'].items():
p.set_edgecolor('none')
p.set_facecolor(light_blue)
labels = {t.get_text(): t for t in result['axis'].texts}
for id_i, label_i in labels.items():
label_i.set_text(electrode_channels[id_i])
result['patches']
x_coords = [p[0] for e in chip_info_mm['electrodes'] for p in e['points']]
y_coords = [p[1] for e in chip_info_mm['electrodes'] for p in e['points']]
for ax in axes:
ax.set_xlim(min(x_coords), max(x_coords))
ax.set_ylim(max(y_coords), min(y_coords))
ax.set_axis_off()
ax.set_frame_on(False)
# Draw QC test route
# ------------------
# Find center of electrode associated with each DropBot channel.
df_electrode_centers = pd.DataFrame([e['pole_of_accessibility']
for e in chip_info_mm['electrodes']],
index=[e['id'] for e in
chip_info_mm['electrodes']])
df_electrode_centers.index.name = 'id'
s_electrode_channels = pd.Series(electrode_channels)
df_channel_centers = df_electrode_centers.loc[s_electrode_channels.index]
df_channel_centers.index = s_electrode_channels.values
df_channel_centers.sort_index(inplace=True)
df_channel_centers.index.name = 'channel'
axis = result['axis']
patches = result['patches']
channel_patches = pd.Series(patches.values(),
index=s_electrode_channels[patches.keys()])
df_events = | pd.DataFrame(events) | pandas.DataFrame |
'''
Models all possible sensor combinations using an LSTM RNN
'''
import pandas as pd
import os
import glob
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from itertools import combinations
from sklearn.model_selection import StratifiedKFold
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Bidirectional
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
class lstm:
def __init__(self, sensors):
self.sensors = sensors
self.path = os.getcwd()
self.import_data()
self.build_3d_frames()
self.kfold_cv()
def import_data(self):
os.chdir(self.path+'/non_decomp')
non_files = glob.glob('*.csv')
self.non_frames = {}
non_size=0
for n in non_files:
df = pd.read_csv(n)
for c in df.columns:
check = any(x in c for x in self.sensors)
if check == False:
df = df.drop(c, axis=1)
non_size += df.shape[0]
self.non_frames[n[:2]] = df
os.chdir(self.path+'/nas_decomp')
nas_files = glob.glob('*.csv')
self.nas_frames = {}
nas_size=0
for n in nas_files:
df = | pd.read_csv(n) | pandas.read_csv |
import pandas as pd
from rake_nltk import Rake
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
pd.set_option('display.max_columns', 100)
df = | pd.read_csv('movie_metadata.csv') | pandas.read_csv |
"""
This module implements an f* coordinate and diagram generator.
For information about f* diagrams, see the following publications:
<NAME>. et al. Extending the limits of powder diffraction analysis: diffraction parameter space, occupancy defects, and
atomic form factors. Rev. Sci. Instrum. 89, 093002 (2018). 10.1063/1.5044555
<NAME>. et al.Thermodynamics of Antisite Defects in Layered NMC Cathodes: Systematic Insights from High-Precision Powder
Diffraction Analyses Chem. Mater 2020 32 (3), 1002-1010. 10.1021/acs.chemmater.9b03646
"""
import os
import numpy as np
import pandas as pd
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.io.cif import CifParser, str2float
import plotly.express as px
# Load in the form factors
with open(os.path.join(os.path.dirname(__file__),
"xray_factors_2.csv")) as f:
X_RAY_SCATTER_DF = pd.read_csv(f)
# from https://it.iucr.org/Cb/ch6o1v0001/ table 6.1.1.4
with open(os.path.join(os.path.dirname(__file__),
"neutron_factors.csv")) as f:
NEUTRON_SCATTER_DF = pd.read_csv(f)
# from http://www.ccp14.ac.uk/ccp/web-mirrors/neutrons/n-scatter/n-lengths/LIST~1.HTM
class FStarDiagram:
"""
Take a list of structure objects and/or cifs and use them to generate an f* phase diagram.
"""
def __init__(self, structure_objects, scattering_type='X-ray_simple', custom_scatter=None):
"""
Initialize the f* diagram generator with the list of structures and scattering type.
Args:
structure_objects(list): List of structure objects to use in the diagram.
scattering_type(str): Type of scattering to use in the f* calculation. Defaults to 'X-ray_simple'
which uses the atomic number as the scattering factor. 'X-ray' and 'Neutron' are built in scattering
types which use X-ray and neutron scattering factors, respectively. 'Custom' allows the user to
supplement their own calculation with any set of scattering factors.
custom_scatter(function): when using custom scattering set this equal to a global varialble that is equal
to the custom scattering function.
"""
self._structures = structure_objects
self._scatter = scattering_type
self._custscat = custom_scatter
self._symstructs = [SpacegroupAnalyzer(structure).get_symmetrized_structure() for structure in
self._structures]
self._equiv_inds = [struct.equivalent_indices for struct in self._symstructs]
self.site_labels = self.get_site_labels()
self.coords = self.get_fstar_coords()
self.plot = px.scatter_ternary(data_frame=self.coords, a=self.site_labels[0], b=self.site_labels[1],
c=self.site_labels[2])
print("The labels for this structure's unique sites are")
print(self.site_labels)
def edit_fstar_diagram(self, combine_list=False, plot_list=False, **kwargs):
"""
Edit the plot of the f* diagram using plotly express.
Args:
combine_list(list): This is a list of lists which indicates what unique sites need to be combined to make
the plot ternary.
plot_list(list): This is a list that indicates what unique sites or combined sites to plot and what order to
plot them in.
kwargs: use this to add any other arguments from scatter_ternary .
"""
if combine_list:
for combo in combine_list:
self.coords[str(combo)] = sum([self.coords[site] for site in combo])
if str(combo) not in self.site_labels:
self.site_labels.append(str(combo))
if plot_list:
self.plot = px.scatter_ternary(data_frame=self.coords, a=plot_list[0], b=plot_list[1], c=plot_list[2],
**kwargs)
else:
self.plot = px.scatter_ternary(data_frame=self.coords, a=self.site_labels[0], b=self.site_labels[1],
c=self.site_labels[2], **kwargs)
def get_site_labels(self):
"""
Generates unique site labels based on composition, order, and symetry equivalence in the structure object.
Ex:
Structure Summary
Lattice
abc : 2.851 2.851 14.275
angles : 90.0 90.0 119.99999999999999
volume : 100.48498759501827
A : 2.851 0.0 1.745734012184552e-16
B : -1.4254999999999998 2.4690384261894347 1.745734012184552e-16
C : 0.0 0.0 14.275
PeriodicSite: Li:0.990, Ni:0.010 (0.0000, 0.0000, 0.0000) [0.0000, 0.0000, 0.0000]
PeriodicSite: Li:0.990, Ni:0.010 (1.4255, 0.8230, 4.7583) [0.6667, 0.3333, 0.3333]
PeriodicSite: Li:0.990, Ni:0.010 (-0.0000, 1.6460, 9.5167) [0.3333, 0.6667, 0.6667]
PeriodicSite: Li:0.010, Mn:0.333, Co:0.333, Ni:0.323 (0.0000, 0.0000, 7.1375) [0.0000, 0.0000, 0.5000]
PeriodicSite: Li:0.010, Mn:0.333, Co:0.333, Ni:0.323 (1.4255, 0.8230, 11.8958) [0.6667, 0.3333, 0.8333]
PeriodicSite: Li:0.010, Mn:0.333, Co:0.333, Ni:0.323 (-0.0000, 1.6460, 2.3792) [0.3333, 0.6667, 0.1667]
PeriodicSite: O (0.0000, 0.0000, 3.5688) [0.0000, 0.0000, 0.2500]
PeriodicSite: O (0.0000, 0.0000, 10.7063) [0.0000, 0.0000, 0.7500]
PeriodicSite: O (1.4255, 0.8230, 8.3270) [0.6667, 0.3333, 0.5833]
PeriodicSite: O (1.4255, 0.8230, 1.1895) [0.6667, 0.3333, 0.0833]
PeriodicSite: O (-0.0000, 1.6460, 13.0855) [0.3333, 0.6667, 0.9167]
PeriodicSite: O (-0.0000, 1.6460, 5.9480) [0.3333, 0.6667, 0.4167]
results in
labels - ['[0. 0. 0.]Li', '[0. 0. 0.5]Co', '[0. 0. 0.25]O']
'[0. 0. 0.]Li' - PeriodicSite: Li:0.990, Ni:0.010 (0.0000, 0.0000, 0.0000) [0.0000, 0.0000, 0.0000]
'[0. 0. 0.5]Co' - PeriodicSite: Li:0.010, Mn:0.333, Co:0.333, Ni:0.323
(0.0000, 0.0000, 7.1375) [0.0000, 0.0000, 0.5000]
'[0. 0. 0.25]O' - PeriodicSite: O (0.0000, 0.0000, 3.5688) [0.0000, 0.0000, 0.2500]
"""
site_labels_fin = []
for ind1, struct in enumerate(self._equiv_inds):
site_labels = []
for ind2, site in enumerate(struct):
label = str(self._structures[ind1][site[0]].frac_coords) + \
[str(sp) for sp, occ in self._structures[ind1][site[0]].species_and_occu.items()][0]
if label not in site_labels:
site_labels.append(label)
if len(site_labels) > len(site_labels_fin):
site_labels_fin = site_labels
return site_labels_fin
def get_fstar_coords(self):
"""
Calculate the f* coordinates for the list of structures.
"""
fstar_df_full = | pd.DataFrame(columns=self.site_labels) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import logging
import os
from pathlib import Path
class MSRawData:
"""
A class to describe raw data obtained from the MS machine
Args:
filePath (str): file path of the input MRM transition name file
logger (object): logger object created by start_logger in MSOrganiser
ingui (bool): if True, print analysis status to screen
"""
def __init__(self, filepath, logger=None, ingui=True):
self.__logger = logger
self.__ingui = ingui
self.__filecheck(Path(filepath))
def __filecheck(self,filepath):
"""Check if filepath exists and is a file"""
if not filepath.exists():
if self.__logger:
self.__logger.error('Input file path ' + '\'' + str(filepath) + '\'' +
' could not be found. ' +
'Please check if the input file path.')
if self.__ingui:
print('Input file path ' + '\'' + str(filepath) + '\'' +
' could not be found. ' +
'Please check if the input file path.',
flush = True)
sys.exit(-1)
elif not filepath.is_file():
if self.__logger:
self.__logger.error('Input file path ' + '\'' + str(filepath) + '\'' +
' does not lead to a system file. ' +
'Please check if the input file path is a system file and not a folder.')
if self.__ingui:
print('Input file path ' + '\'' + str(filepath) + '\'' +
' does not lead to a system file. ' +
'Please check if the input file path is a system file and not a folder.',
flush = True)
sys.exit(-1)
def remove_whiteSpaces(self,df):
"""Strip the whitespaces for each string columns of a df
Args:
df (pandas DataFrame): A panda data frame
Returns:
df (pandas DataFrame): A panda data frame with white space removed
"""
df[df.select_dtypes(['object']).columns] = df.select_dtypes(['object']).apply(lambda x: x.str.strip())
return df
class AgilentMSRawData(MSRawData):
"""
To describe raw data obtained from the Agilent MS machine
Args:
filePath (str): file path of the input MRM transition name file
logger (object): logger object created by start_logger in MSOrganiser
ingui (bool): if True, print analysis status to screen
"""
def __init__(self, filepath, logger=None, ingui=True):
MSRawData.__init__(self,filepath, ingui = ingui,logger=logger)
self.__logger = logger
self.__ingui = ingui
self.__readfile(filepath)
self.__getdataform(filepath)
self.__filename = os.path.basename(filepath)
self.VALID_COMPOUND_RESULTS = ('Area','RT','FWHM','S/N','Symmetry')
self.VALID_COMPOUND_METHODS = ('Precursor Ion','Product Ion')
def get_table(self,column_name,is_numeric=True):
"""Function to get the table from MassHunter Raw Data
Args:
column_name (str): The name of the column given in the Output_Options.
Returns:
A data frame of sample as rows and transition names as columns with values from the chosen column name
"""
if self.DataForm == "WideTableForm":
return self.__get_table_wide(column_name,is_numeric)
elif self.DataForm == "CompoundTableForm":
return self.__get_table_compound(column_name,is_numeric)
#def get_data_file_name(self):
# """Function to get the list of sample names in a form of a dataframe
#
# Returns:
# A data frame of sample as rows and transition names as columns with values from the chosen column name
#
# """
# if self.DataForm == "WideTableForm":
# return self.__get_data_file_name_wide()
# elif self.DataForm == "CompoundTableForm":
# return self.__get_data_file_name_compound()
def __get_table_wide(self,column_name,is_numeric=True):
"""Function to get the table from MassHunter Raw Data in Wide Table form"""
# Get the data file name and give error when it cannot be found
DataFileName_df = self.__get_data_file_name_wide()
# Check if Column name comes from Results or Methods group
if column_name in self.VALID_COMPOUND_RESULTS:
column_group = "Results"
elif column_name in self.VALID_COMPOUND_METHODS:
column_group = "Method"
else:
if self.__logger:
self.__logger.error('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.')
if self.__ingui:
print('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.',
flush=True)
sys.exit(-1)
# Extract the data with the given column name and group
table_index = self.RawData.iloc[0,:].str.contains(column_group) & self.RawData.iloc[1,:].str.contains(column_name)
table_df = self.RawData.loc[:,table_index].copy()
if table_df.empty:
return table_df
# Remove the column group text and whitespaces
table_df.iloc[0,:] = table_df.iloc[0,:].str.replace(column_group, "").str.strip()
# Assign column name
colnames = table_df.iloc[0,:].astype('str').str.strip()
table_df.columns = colnames
# We remove the first and second row because the column names are given
table_df = table_df.iloc[2:]
# Reset the row index
table_df = table_df.reset_index(drop=True)
# Convert text numbers into numeric
if is_numeric:
table_df = table_df.apply(pd.to_numeric, errors='coerce')
table_df = pd.concat([DataFileName_df, table_df], axis=1)
# Strip the whitespaces for each string columns
table_df = self.remove_whiteSpaces(table_df)
return table_df
def __get_table_compound(self,column_name,is_numeric=True):
"""Function to get the table from MassHunter Raw Data in Compound Table form"""
# Get the data file name and give error when it cannot be found
DataFileName_df = self.__get_data_file_name_compound()
# Check if Column name comes from Results or Methods group
# TODO try to extract data from VALID_COMPOUND_METHODS
if column_name in self.VALID_COMPOUND_RESULTS:
column_group = "Results"
elif column_name in self.VALID_COMPOUND_METHODS:
column_group = "Method"
else:
if self.__logger:
self.__logger.error('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.')
if self.__ingui:
print('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.',
flush=True)
sys.exit(-1)
# Get the compound table df and give error when it cannot be found
# CompoundName_df = self.__get_compound_name_compound(column_name)
table_df = self.__get_compound_name_compound(column_name)
if table_df.empty:
return table_df
table_df = table_df.transpose()
# Assign column name
colnames = table_df.iloc[0,:].astype('str').str.strip()
table_df.columns = colnames
# We remove the first row because the column names are given
table_df = table_df.iloc[1:]
# If column name is a compound method, only the first row has data, we need to replicate data for all the rows
if column_name in self.VALID_COMPOUND_METHODS:
table_df = pd.concat([table_df]*DataFileName_df.shape[0], ignore_index=True)
# Reset the row index
table_df = table_df.reset_index(drop=True)
# Convert text numbers into numeric
if is_numeric:
table_df = table_df.apply(pd.to_numeric, errors='coerce')
table_df = pd.concat([DataFileName_df, table_df], axis=1)
# Strip the whitespaces for each string columns
table_df = self.remove_whiteSpaces(table_df)
return table_df
def __get_compound_name_compound(self,column_name):
"""Function to get the df Transition Name as Rows, Sample Name as Columns with values from the chosen column_name. E.g Area """
# Get the column index of where the Transition Names are. We know for sure that it is on the third row
Compound_Col = self.RawData.iloc[0,:].str.contains("Compound Method") & self.RawData.iloc[1,:].str.contains("Name")
Compound_Col_Index = Compound_Col.index[Compound_Col == True].tolist()
# Give an error if we can't get any transition name
if len(Compound_Col_Index) == 0 :
if self.__logger:
self.__logger.error('\'' + self.__filename + '\' ' +
'has no column contaning \"Name\" in Compound Method Table. ' +
'Please check the input file.')
if self.__ingui:
print('\'' + self.__filename + '\' ' +
'has no column contaning \"Name\" in Compound Method Table. ' +
'Please check the input file.',
flush=True)
sys.exit(-1)
# Find cols with Transition in second row and Qualifier Method in the first row
Qualifier_Method_Col = self.RawData.iloc[0,:].str.contains("Qualifier \d Method", regex=True) & self.RawData.iloc[1,:].str.contains("Transition")
# Get the column index where the group of Qualifier Method first appeared.
Qualifier_Method_Col_Index = Qualifier_Method_Col.index[Qualifier_Method_Col == True].tolist()
# Find cols with Data File in the second row
DataFileName_Col = self.RawData.iloc[1,:].str.contains("Data File")
# Find the number of Qualifiers each Transition is entitled to have
No_of_Qual_per_Transition = int((Qualifier_Method_Col.values == True).sum() / (DataFileName_Col.values == True).sum() )
# We start a new Compound_list
Compound_list = []
# We start on row three
self.RawData.iloc[2:,sorted(Compound_Col_Index + Qualifier_Method_Col_Index[0:No_of_Qual_per_Transition] )].apply(lambda x: AgilentMSRawData._get_Compound_List(x=x,
Compound_list=Compound_list),
axis=1)
CompoundName_df = pd.DataFrame(Compound_list)
CompoundName_df = self.remove_whiteSpaces(CompoundName_df)
# All Column Name (e.g Area) and Transition index
ColName_Col = self.RawData.iloc[1,:].str.contains(column_name) | self.RawData.iloc[1,:].str.contains("Transition")
ColName_Col_Index = ColName_Col.index[ColName_Col == True].tolist()
# Transition from Compound Method (They should not be used to get the Qualifer Area)
CpdMethod_Transition_Col = self.RawData.iloc[0,:].str.contains("Compound Method") & self.RawData.iloc[1,:].str.contains("Transition")
CpdMethod_Transition_Col_Index = CpdMethod_Transition_Col.index[CpdMethod_Transition_Col == True].tolist()
# Column Name (e.g Area) found for the Qualifier
ColName_Qualifier_Col = self.RawData.iloc[0,:].str.contains("Qualifier \d Results", regex=True) & self.RawData.iloc[1,:].str.contains(column_name)
ColName_Qualifier_Col_Index = ColName_Qualifier_Col.index[ColName_Qualifier_Col == True].tolist()
# Column Name (e.g Area), found for the Transitions
ColName_Compound_Col_Index = [x for x in ColName_Col_Index if x not in sorted(CpdMethod_Transition_Col_Index + Qualifier_Method_Col_Index + ColName_Qualifier_Col_Index, key = int)]
table_list = []
# We start on row three, update the table list with the column_name
self.RawData.iloc[2:,sorted(ColName_Col_Index, key=int)].apply(lambda x: AgilentMSRawData._get_Compound_Data(x=x,
table_list=table_list,
ColName_Compound_Col_Index = ColName_Compound_Col_Index,
Qualifier_Method_Col_Index = Qualifier_Method_Col_Index,
ColName_Qualifier_Col_Index = ColName_Qualifier_Col_Index,
No_of_Qual_per_Transition = No_of_Qual_per_Transition)
,axis=1)
if pd.DataFrame(table_list).empty:
return(pd.DataFrame(table_list))
else:
# TODO
# Check if the number of rows in the table_list of values,
# matches the number of rows (Transition and Qualifier Names) in the CompoundName_df
# If not, give an error of a possible corrupted csv input.
#print(len(CompoundName_df.index))
#print(len(pd.DataFrame(table_list)))
return(pd.concat([CompoundName_df, pd.DataFrame(table_list) ], axis=1))
return(pd.DataFrame())
def _get_Compound_Data(x,table_list,ColName_Compound_Col_Index,Qualifier_Method_Col_Index,ColName_Qualifier_Col_Index,No_of_Qual_per_Transition):
"""Function to get the values from the chosen column_name. E.g(Area) from a given row from the raw MRM data from Agilent in Compound Table form. table_list will be updated"""
#Get Compound row
Compound_df = pd.DataFrame(x[x.index.isin(ColName_Compound_Col_Index)])
Compound_df = Compound_df.T.values.tolist()
#Append to table_list
table_list.extend(Compound_df)
#Get Qualifier row
for i in range(0,No_of_Qual_per_Transition):
#Check if there is a transition
#print([Qualifier_Method_Col_Index[i]])
#print(x[ x.index.isin([Qualifier_Method_Col_Index[i]])].values.tolist()[0])
#sys.exit(0)
Transition = x[ x.index.isin([Qualifier_Method_Col_Index[i]])].values.tolist()[0]
if( | pd.isna(Transition) | pandas.isna |
"""
Script containing utilities related to manual data cleaning, processing, and transformation.
"""
from pandas import read_csv, get_dummies, to_numeric
from pandas.api.types import is_numeric_dtype
import numpy as np
from scipy import stats
def unknown_to_nan(df, additional_regex_patterns=[], additional_strings=[]):
"""
Function to convert typical unknown & unspecified values in a data frame to NaN values
@param (pd.DataFrame) df: data frame with unknown values represented in various forms
@param (list) additional_regex_patterns: regex patterns, alongside 'default_regex_patterns',
that may be contained in unknown values in @df
@param (list) additional_strings: strings, alongside 'default_strings', that may represent an
unknown value in @df
"""
default_regex_patterns = [r'^\s*$'] # blank character
default_strings = ['n/a', 'N/A', 'NA', 'UNKNOWN', '-UNKNOWN-', '-unknown-', 'unknown']
# Replace UNKNOWN tokens with proper missing value annotations
for regex_pattern in default_regex_patterns + additional_regex_patterns:
df.replace(to_replace=regex_pattern, value=np.nan, inplace=True, regex=True)
for string in default_strings + additional_strings:
df.replace(to_replace=string, value=np.nan, inplace=True)
return df
def categorical_to_onehot_columns(df, target_column=None):
"""
Function to encode categorical columns (features) into one-hot encoded columns.
@param (pd.DataFrame) df: data frame with categorical variables as columns
@param (str) target_column: column name for the dependent target variable in @df (default: None)
"""
for column in df.columns.values:
if not is_numeric_dtype(df[column]) and not column == target_column:
# Convert to one-hot representation
onehot_column = get_dummies(data=df[column], prefix=column, prefix_sep='=')
# Replace all 0 rows (deriving from NaN values) with the mean of the one-hot column
onehot_column.loc[(onehot_column == 0).all(axis=1)] = onehot_column.mean().tolist()
# Join with the original data frame and remove the original categorical column
df = df.join(other=onehot_column).drop(column, axis=1)
return df
def groupby_and_gather(raw_df):
"""
Function to groupby by age-gender buckets and gather the population per country, included as
a problem-specific example rather than a generalized utility.
@param (pd.DataFrame) raw_df: data frame with age-gender buckets and corresponding information
"""
raw_df['bucket_id'] = raw_df['age_bucket'].str.replace('+', '-200') + raw_df['gender']
raw_df.drop(['year', 'age_bucket', 'gender'], axis=1, inplace=True)
for column in set(raw_df['country_destination'].values):
raw_df[column] = 0
for country in set(raw_df['country_destination'].values.tolist()):
raw_df.loc[raw_df['country_destination'] == country, country] = 1
raw_df.drop('country_destination', axis=1, inplace=True)
for column in raw_df.columns.values:
if column != 'bucket_id' and column != 'population_in_thousands':
raw_df[column] = raw_df[column] * raw_df['population_in_thousands']
sum_df = raw_df.groupby(['bucket_id']).sum()
sum_df.drop('population_in_thousands', axis=1, inplace=True)
sum_df.reset_index(inplace=True)
return sum_df
string_age_buckets = set(
read_csv('(0)data/age_gender_bkts.csv')['age_bucket'].str.replace('+', '-200').values.tolist()
)
range_to_string_mapping = {tuple([i for i in range(int(rep.split('-')[0]),
int(rep.split('-')[1]) + 1)]): rep
for rep in string_age_buckets}
def age_to_age_bucket(age):
"""Function to convert a given integer age value to the corresponding age bucket"""
for bucket in range_to_string_mapping.keys():
if int(age) in tuple(bucket):
return range_to_string_mapping[bucket]
def mcar_test(df, significance_level=0.05):
"""
Function for performing Little's chi-square test (1988) for the assumption (null hypothesis) of
missing completely at random (MCAR). Data should be multivariate and quantitative, categorical
variables do not work. The null hypothesis is equivalent to saying that the missingness of the
data is independent of both the observed and unobserved data. Common imputation methods like
likelihood inference and listwise deletion are theorized to be valid (due to non-inclusion of
bias) only when the data is missing at random (MAR), which MCAR is a subset of.
@param (pd.DataFrame) df: data frame with missing values that are ideally all quantitative
@param (float) significance_level: alpha parameter of the chi-squared test (default: 0.05)
"""
test_df = df.copy()
# Check if data contains categorical variables and select only numerical (float) columns
if False in [ | is_numeric_dtype(test_df[column]) | pandas.api.types.is_numeric_dtype |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Series, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = | fmt.format_array(self.categories[:num], None) | pandas.io.formats.format.format_array |
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
from imblearn.over_sampling import RandomOverSampler
def transform_jkq(x):
'''
将J、Q、K映射成11、12、13
:param x:
:return:
'''
if x == 'J':
return 11
elif x == 'Q':
return 12
elif x == 'K':
return 13
else:
return x
def bincount2D_vectorized(a):
'''
计算四种花色的数量和13种排名的有无
:param a:
:return:
'''
N = a.max() + 1
a_offs = a + np.arange(a.shape[0])[:, None] * N
return np.bincount(a_offs.ravel(), minlength=a.shape[0] * N).reshape(-1, N)
def preprocess_features(input_data_df):
#将J、Q、K映射成11、12、13
input_data_df['C1'] = input_data_df['C1'].apply(transform_jkq)
input_data_df['C2'] = input_data_df['C2'].apply(transform_jkq)
input_data_df['C3'] = input_data_df['C3'].apply(transform_jkq)
input_data_df['C4'] = input_data_df['C4'].apply(transform_jkq)
input_data_df['C5'] = input_data_df['C5'].apply(transform_jkq)
# 将C、D、H、S 映射为1、2、3、4
encode_map = {'C': 1, 'D': 2, 'H': 3, 'S': 4}
input_data_df['S1'] = input_data_df['S1'].map(encode_map)
input_data_df['S2'] = input_data_df['S2'].map(encode_map)
input_data_df['S3'] = input_data_df['S3'].map(encode_map)
input_data_df['S4'] = input_data_df['S4'].map(encode_map)
input_data_df['S5'] = input_data_df['S5'].map(encode_map)
# 计算四种花色的数量
S_training = input_data_df.iloc[:, [0, 2, 4, 6, 8]].astype(int)
S_training = pd.DataFrame(bincount2D_vectorized(S_training.values), columns=['suitCount0', 'suitCount1', 'suitCount2', 'suitCount3', 'suitCount4'])
input_data_df = pd.merge(input_data_df, S_training, how='left', left_index=True, right_index=True).drop(['suitCount0'], axis=1)
# 计算13种排名的有无
R_training = input_data_df.iloc[:, np.arange(1, 10, 2)].astype(int)
cols = ['rank{}'.format(x) for x in range(0, 14, 1)]
R_training = pd.DataFrame(bincount2D_vectorized(R_training.values), columns=cols)
input_data_df = | pd.merge(input_data_df, R_training, how='left', left_index=True, right_index=True) | pandas.merge |
import time
from pathlib import Path
from typing import Tuple, Sequence
from collections import Counter
import numpy as np
import pandas as pd
from torch.utils import data
from tqdm import tqdm
from sandstone.datasets.factory import RegisterDataset
from sandstone.utils.generic import log, md5
import warnings
warnings.simplefilter("ignore")
class MIMIC_IV_Abstract_Dataset(data.Dataset):
"""Abstract class for different MIMIC-IV tasks.
Handles data loading, caching, splitting, and various generic preprocessing steps.
"""
def __init__(self, args, split_group):
super(MIMIC_IV_Abstract_Dataset, self).__init__()
self.args = args
self.split_group = split_group
cache_static_filename = get_cache_filename('static', args=args)
cache_hourly_filename = get_cache_filename('hourly', args=args)
print(f"Loading item mapping ({args.item_map_path})")
item_mapping = pd.read_csv(args.item_map_path, low_memory=False)
if Path(args.cache_dir, cache_static_filename).is_file() and Path(args.cache_dir, cache_hourly_filename).is_file():
print("Loading cached static_df and aggregated_df:", cache_static_filename, cache_hourly_filename)
static_df = pd.read_parquet(Path(args.cache_dir, cache_static_filename))
aggregated_df = pd.read_parquet(Path(args.cache_dir, cache_hourly_filename))
else:
# compute which csvs are needed
task_csv_subset = set(self.task_specific_features.keys())
features_csv_subset = set(item_mapping.origin.loc[item_mapping.origin != 'static'].dropna())
# by default, patients, chartevents, admissions and icustays are loaded
self.csv_subset = set(('patients', 'chartevents', 'admissions', 'icustays')).union(task_csv_subset).union(features_csv_subset)
raw_dataframes = load_data(args.dataset_path, subset=self.csv_subset, nrows=args.nrows, chunksize=args.chunksize, cache_dir=args.cache_dir)
static_df, aggregated_df = self.create_dataframes(args, item_mapping, **raw_dataframes)
# cache final dataframes
static_df.to_parquet(Path(args.cache_dir, cache_static_filename))
aggregated_df.to_parquet(Path(args.cache_dir, cache_hourly_filename))
print("Generating labels")
self.create_labels(static_df, aggregated_df, task=args.task, threshold=args.los_threshold)
if args.dataset == 'mimic-iv-sepsis':
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Adding onset hour to static_df")
onset = aggregated_df.groupby('hadm_id')[args.task+'_onset_hour'].mean()
static_df = static_df.merge(onset, how='left', on='hadm_id')
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
print("Filter for just feature columns")
static_cols = ['subject_id', 'hadm_id', 'intime', 'y', args.task+'_onset_hour']
cols_to_keep = ['hadm_id', 'hour']
if len(args.features) != 0:
# convert to lower case
args.features = [x.lower() for x in args.features]
if args.group_by_level2:
static_cols.extend(args.features)
cols_to_keep.extend(args.features)
else:
feature_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower().isin(args.features)]['itemid'].map(str))
static_cols.extend(feature_ids)
cols_to_keep.extend(feature_ids)
else:
static_cols.extend(list(item_mapping.itemid.map(str)))
if args.group_by_level2:
cols_to_keep.extend(list(item_mapping.LEVEL2))
else:
cols_to_keep.extend(list(item_mapping.itemid.map(str)))
if args.feature_search is not None:
args.feature_search = args.feature_search.lower()
if args.group_by_level2:
print("Search feature:", args.feature_search)
static_cols.extend(args.feature_search)
cols_to_keep.extend(args.feature_search)
else:
search_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower() == (args.feature_search)]['itemid'].map(str))
print("Search IDs:", search_ids)
cols_to_keep.extend(search_ids)
static_cols.extend(search_ids)
if len(args.feature_remove) != 0:
# convert to lower case
args.feature_remove = [x.lower() for x in args.feature_remove]
if args.group_by_level2:
remove_ids = args.feature_remove
else:
remove_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower().isin(args.feature_remove)]['itemid'].map(str))
for feature in remove_ids:
if feature in cols_to_keep:
print("Removed feature:", feature)
cols_to_keep.remove(feature)
if feature in static_cols:
static_cols.remove(feature)
original_cols = [c for c in cols_to_keep if c in aggregated_df.columns]
if args.impute_method == 'simple':
exist_cols = [c+'_exist' for c in original_cols if c not in ['hadm_id', 'hour']]
time_cols = [c+'_time_since' for c in original_cols if c not in ['hadm_id', 'hour']]
cols_to_keep.extend(exist_cols)
cols_to_keep.extend(time_cols)
static_df = static_df.loc[:, static_df.columns.isin(static_cols)]
aggregated_df = aggregated_df.loc[:,aggregated_df.columns.isin(cols_to_keep)]
if args.dataset == 'mimic-iv-sepsis':
print(f"Re-indexing and zero filling")
aggregated_df = reindex_timeseries(aggregated_df)
aggregated_df.fillna({x:0 for x in original_cols}, inplace=True)
if args.impute_method == 'simple':
aggregated_df.fillna({x:0 for x in exist_cols}, inplace=True)
aggregated_df.fillna({x:100 for x in time_cols}, inplace=True)
print("Static df size:", static_df.shape)
print("Static df columns:", static_df.columns)
print("Aggregated df size:", aggregated_df.shape)
print("Aggregated df columns:", aggregated_df.columns)
print("Static df stats:")
print(static_df.describe())
print("Aggregated df stats:")
print(aggregated_df.describe())
print("Binarize/One-hot encode categorical feature columns")
if 'gender' in static_df.columns:
static_df['gender'] = (static_df.gender == 'M').astype(bool)
for col in ['marital_status', 'ethnicity']:
if col in static_df.columns:
dummies = pd.get_dummies(static_df[col]).add_prefix(col+"_").astype(bool)
static_df.drop(columns=col, inplace=True)
static_df[dummies.columns] = dummies
self.assign_splits(static_df)
if args.normalize is not None:
print("Normalizing values to zero-mean and unit variance.")
if args.group_by_level2:
normalize_feats = set(args.normalize)
else:
normalize_feats = set(item_mapping.loc[item_mapping['LEVEL2'].isin(args.normalize)].itemid.unique())
static_norm_cols = list(normalize_feats.intersection(static_df.columns))
hourly_norm_cols = list(normalize_feats.intersection(aggregated_df.columns))
unused_norm_cols = normalize_feats.difference(set(static_norm_cols + hourly_norm_cols))
if len(unused_norm_cols) != 0:
print("WARNING: Couldn't find specified columns to normalize by: {}!".format(unused_norm_cols))
static_train = static_df.loc[static_df.split_group == 'train']
static_normalize_df = static_train[static_norm_cols]
hourly_normalize_df = aggregated_df.loc[aggregated_df.hadm_id.isin(static_train.hadm_id.unique()), hourly_norm_cols]
# compute stats over train data
static_mean, static_std = static_normalize_df.mean(), static_normalize_df.std()
hourly_mean, hourly_std = hourly_normalize_df.mean(), hourly_normalize_df.std()
# prevent division by zero
static_std.loc[static_std == 0] = 1
hourly_std.loc[hourly_std == 0] = 1
# apply to whole dataset
static_df[static_norm_cols] = (static_df[static_norm_cols] - static_mean) / static_std
aggregated_df[hourly_norm_cols] = (aggregated_df[hourly_norm_cols] - hourly_mean) / hourly_std
if args.flatten_timeseries:
flattened_df = flatten_timeseries(aggregated_df)
static_df = static_df.merge(flattened_df, on='hadm_id')
elif args.timeseries_moments:
moments_df = compute_timeseries_moments(aggregated_df, args.timeseries_moments)
static_df = static_df.merge(moments_df, on='hadm_id')
static_df.columns = static_df.columns.map(str)
self.static_features = [col for col in static_df.columns if col not in ['y', 'subject_id', 'hadm_id', 'intime', 'split_group', args.task+'_onset_hour']]
self.timeseries_features = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour']]
static_df = static_df.loc[static_df['split_group'] == split_group]
if not args.static_only:
# if non-flattened hourly data is used, also filter aggregated_df
aggregated_df = aggregated_df.loc[aggregated_df['hadm_id'].isin(static_df['hadm_id'].unique())]
static_df.drop(columns='split_group', inplace=True)
if args.static_only:
self.dataset = self.create_dataset(static_df)
else:
self.dataset = self.create_dataset(static_df, aggregated_df)
# Class weighting
label_dist = [d['y'] for d in self.dataset]
label_counts = Counter(label_dist)
weight_per_label = 1./ len(label_counts)
label_weights = {
label: weight_per_label/count for label, count in label_counts.items()
}
self.weights = [ label_weights[d['y']] for d in self.dataset]
log(self.get_summary_statement(self.args.task, split_group, self.args.current_test_years, self.args.onset_bucket, label_counts), args)
@property
def task(self):
raise NotImplementedError("Abstract method needs to be overridden!")
@property
def task_specific_features(self, task=None):
"""Defines some itemids/gsns/icd_codes that are needed for the task.
Returns:
a dictionary mapping origin dataset -> list of itemids.
"""
return {}
def create_dataframes(self, args, item_mapping, **raw_dataframes):
"""Preprocesses raw dataframes into static_df and aggregated_df.
Returns:
- static_df
- must include columns 'hadm_id', and 'y' for the label.
- any additional columns will be used as input features for prediction.
- timeseries_df
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def assign_splits(self, meta):
if self.args.timesplit:
# assign train_years as a list of years [2008, 2010] inclusive for instance.
train_start, train_end = map(int, self.args.train_years.split('-'))
meta['split_group'] = None
meta.loc[(meta['intime'].dt.year>=train_start) & (meta['intime'].dt.year<=train_end), 'split_group'] = 'train'
# dev will be a subset of train, of proportion split_probs[dev]
dev_prob = self.args.split_probs[1]
train_rows = meta[meta.split_group=='train'].shape[0]
dev_rows = int(dev_prob*train_rows)
meta.loc[meta[meta['split_group']=='train'].head(dev_rows).index, 'split_group'] = 'dev'
# if testing on training years, then final split is test set
if self.args.train_years == self.args.current_test_years:
test_prob = self.args.split_probs[2]
test_rows = int(test_prob*train_rows)
mask = meta.index.isin(meta[meta['split_group']=='train'].tail(test_rows).index)
else:
test_start, test_end = map(int, self.args.current_test_years.split('-'))
mask = meta['intime'].dt.year>=test_start
mask &= meta['intime'].dt.year<=test_end
# adding to the mask onset bucket
if self.args.onset_bucket is not None:
hour_start, hour_end = map(int, self.args.onset_bucket.split('-'))
mask &= meta[self.args.task+'_onset_hour'] >= hour_start
mask &= meta[self.args.task+'_onset_hour'] <= hour_end
meta.loc[mask, 'split_group'] = 'test'
else:
subject_ids = list(sorted(meta['subject_id'].unique()))
start_idx = 0
meta['split_group'] = None
for split, prob in zip(['train', 'dev', 'test'], self.args.split_probs):
end_idx = start_idx + int(len(subject_ids) * prob)
start = subject_ids[start_idx]
end = subject_ids[end_idx-1]
meta.loc[(meta['subject_id'] >= start) & (meta['subject_id'] <= end), 'split_group'] = split
start_idx = end_idx
if meta.loc[meta['subject_id']==subject_ids[end_idx-1]]['split_group'].isnull().any():
meta.loc[meta['subject_id']==subject_ids[end_idx-1], 'split_group'] = split
return meta
def create_dataset(self, static_df, aggregated_df=None):
"""Turns DataFrames into a list of samples, which are dicts containing 'pid', 'x', 'y', and
possibly 'x_timeseries' keys
"""
dataset = []
pids = static_df['subject_id'].values.astype(np.int32)
hadm_ids = static_df['hadm_id'].values.astype(np.int32)
ys = static_df['y'].values.astype(np.float32)
xs = static_df[self.static_features].values.astype(np.float32)
for i in tqdm(range(len(pids)), desc='Creating dataset', total=len(pids)):
patient_dict = {}
patient_dict['pid'] = pids[i]
patient_dict['y'] = ys[i]
patient_dict['x'] = xs[i]
if aggregated_df is not None:
patient_rows = aggregated_df.loc[aggregated_df.hadm_id == hadm_ids[i]]
assert len(patient_rows) > 0, "Found patient with no timeseries data!"
x_timeseries = patient_rows[self.timeseries_features].values.astype(np.float32)
patient_dict['x_timeseries'] = x_timeseries
dataset.append(patient_dict)
return dataset
def create_labels(self, static_df, aggregated_df, task, threshold):
"""Generates per-patient labels for the given task
Returns:
- static_df with an extra 'y' column
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def extract_timerange(self, args, aggregated_df, task):
"""Extracts a fixed no. of hours of data to predict from
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def get_summary_statement(self, task, split_group, years, hours, class_balance):
return "Created MIMIC-IV {} {} dataset for years {} and onset hours {} with the following class balance:\n{}".format(task, split_group, years, hours, class_balance)
def set_args(self, args):
args.num_classes = 2
args.input_dim = len(self.static_features)
if not args.flatten_timeseries:
args.timeseries_dim = len(self.timeseries_features)
args.timeseries_len = args.data_hours
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
@RegisterDataset("mimic-iv-sepsis")
class MIMIC_IV_Sepsis_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "Sepsis-3"
@property
def task_specific_features(self):
return {
'inputevents': [221662, 221653, 221289, 221906], # dopamine, dobutamine, epinephrine, norepinephrine
'outputevents': [226559, 226560, 226561, 226584, 226563, 226564, 226565, 226567, 226557,
226558, 227488, 227489], # for urine output
'labevents': [51265, 50885, 50912, 50821, 51301], # platelets, bilirubin, creatinine, PO2, WBC-count
'chartevents': [223835, 220739, 223900, 223901, 223849, 229314, # FiO2, GCS-Eye, GCS-Verbal, GCS-Motor, vent_mode, vent_mode (Hamilton)
223762, 223761, 220045, 220210, 224690], # temp_C, temp_F, heart rate, resp rate, resp rate (total)
'microbiologyevents': None, # all microbio samples (no id filtering happens on microbioevents, so None can be used here)
'prescriptions': None,
}
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays,
inputevents, labevents, microbiologyevents=None, prescriptions=None, outputevents=None,
diagnoses_icd=None, procedureevents=None, **extra_dfs):
# filter patients and merge data (code from before)
admissions, patients, icustays = filter_eligible_patients(admissions, patients, icustays,
args.min_patient_age, args.min_hours, args.gap_hours,
args.min_icu_stay, args.max_icu_stay)
chartevents = filter_table_patients(chartevents, patients)
labevents = filter_table_patients(labevents, patients)
inputevents = filter_table_patients(inputevents, patients)
microbiologyevents = filter_table_patients(microbiologyevents, patients)
prescriptions = filter_table_patients(prescriptions, patients)
outputevents = filter_table_patients(outputevents, patients)
diagnoses_icd = filter_table_patients(diagnoses_icd, patients)
procedureevents = filter_table_patients(procedureevents, patients)
print("Merging static data...")
static_df = patients[["subject_id", "gender", "anchor_age"]]
static_df = static_df.merge(admissions[["subject_id", "hadm_id", "admittime", "dischtime", "insurance", "admission_type", "marital_status", "ethnicity"]],
how="inner", on="subject_id")
static_df = static_df.merge(icustays[["hadm_id", "stay_id", "first_careunit", "intime", "outtime", "los"]],
how="inner", on="hadm_id")
static_df.rename(columns={"anchor_age": "age", "stay_id": "icustay_id"}, inplace=True)
print("Filter events")
chartevents_features = item_mapping.loc[item_mapping.origin == 'chartevents'].itemid.astype(int).tolist()
inputevents_features = item_mapping.loc[item_mapping.origin == 'inputevents'].itemid.astype(int).tolist()
outputevents_features = item_mapping.loc[item_mapping.origin == 'outputevents'].itemid.astype(int).tolist()
labevents_features = item_mapping.loc[item_mapping.origin == 'labevents'].itemid.astype(int).tolist()
procedureevents_features = item_mapping.loc[item_mapping.origin == 'procedureevents'].itemid.astype(int).tolist()
prescriptions_features = item_mapping.loc[item_mapping.origin == 'prescriptions'].itemid.tolist()
inputevents_features.extend(self.task_specific_features['inputevents'])
outputevents_features.extend(self.task_specific_features['outputevents'])
labevents_features.extend(self.task_specific_features['labevents'])
chartevents_features.extend(self.task_specific_features['chartevents'])
filtered_inputevents = inputevents.loc[inputevents['itemid'].isin(inputevents_features)]
filtered_outputevents = outputevents.loc[outputevents['itemid'].isin(outputevents_features)]
filtered_labevents = labevents.loc[labevents['itemid'].isin(labevents_features)]
filtered_chartevents = filter_variables(chartevents, chartevents_features)
filtered_prescriptions = prescriptions.loc[prescriptions['gsn'].isin(prescriptions_features)]
antibiotics = filter_antibiotics(prescriptions)
filtered_diagnoses = filter_diagnoses(diagnoses_icd, item_mapping)
filtered_procedures = procedureevents.loc[procedureevents['itemid'].isin(procedureevents_features)]
# standardize units
print("Standardizing units")
filtered_chartevents = standardize_units(filtered_chartevents, item_mapping)
# merge diagnoses with static_df
filtered_diagnoses['value'] = 1
pivot_diagnoses = filtered_diagnoses.pivot_table(index='hadm_id', columns='icd_code', values ='value')
static_df = static_df.merge(pivot_diagnoses, on='hadm_id', how='left')
static_df[pivot_diagnoses.columns] = static_df[pivot_diagnoses.columns].fillna(0)
print("Filter events to stay")
filtered_inputevents.rename(columns={"starttime": "charttime"}, inplace=True)
antibiotics.rename(columns={'starttime':'charttime'}, inplace=True)
filtered_procedures.rename(columns={'starttime':'charttime'}, inplace=True)
chartlab_events = pd.concat([filtered_chartevents, filtered_labevents], join='outer')
filtered_prescriptions.rename(columns={'starttime':'charttime', 'gsn':'itemid'}, inplace=True)
# Pass chartevents dataframe and inputevents through hourly aggregation
chartlab_events = filter_events_to_stay(chartlab_events, static_df)
filtered_inputevents = filter_events_to_stay(filtered_inputevents, static_df)
microbiologyevents = filter_events_to_stay(microbiologyevents, static_df)
antibiotics = filter_events_to_stay(antibiotics, static_df)
filtered_prescriptions = filter_events_to_stay(filtered_prescriptions, static_df)
filtered_outputevents = filter_events_to_stay(filtered_outputevents, static_df)
filtered_procedures = filter_events_to_stay(filtered_procedures, static_df)
if args.group_by_level2:
print("Group itemids by actual feature they represent")
item_mapping_chartlab = item_mapping.loc[item_mapping.origin == 'chartevents', ['itemid', 'LEVEL2']].astype({'itemid': int})
chartlab_events = chartlab_events.merge(item_mapping_chartlab, on='itemid', how='left')
group_mask = ~chartlab_events.LEVEL2.isna()
chartlab_events.loc[group_mask, 'itemid'] = chartlab_events.loc[group_mask, 'LEVEL2']
print("Hourly aggregation")
# fill NaN with 1 for incisions etc.
chartlab_events[['value','valuenum']].fillna(1, inplace=True)
aggregated_df = hourly_aggregation(chartlab_events, static_df, filtered_inputevents, antibiotics, microbiologyevents, filtered_outputevents, filtered_procedures, filtered_prescriptions)
print("Calculate SOFA, SI and Sepsis-3")
# import vents -- can move this code into SOFA score if necessary
vents_df = pd.read_csv(args.vent_path, low_memory=False)
vents_df = pd.merge(vents_df, static_df[['subject_id', 'hadm_id', 'icustay_id']],
how='inner', left_on='stay_id', right_on='icustay_id') # filter for relevant stay & patients
vents_df['starttime'] = pd.to_datetime(vents_df.starttime)
vents_df['endtime'] = pd.to_datetime(vents_df.endtime)
vents_df = anchor_dates(vents_df, ['starttime', 'endtime'], patients)
aggregated_df = add_vents(aggregated_df, vents_df)
# Calculate SOFA scores as additional columns
aggregated_df = calculate_SOFA(aggregated_df)
# Calculate Suspicion of Infection as an additional column
aggregated_df = calculate_SI(aggregated_df)
# Calculate Sepsis from SOFA and SI
aggregated_df = calculate_sepsis(aggregated_df, task="sepsis3", consider_difference=args.sepsis_consider_sofa_difference, decrease_baseline=args.sepsis_decrease_sofa_baseline)
# Add SIRS definition as column
# XXX: commented out because of conflict with itemid grouping
#aggregated_df = calculate_SIRS(aggregated_df)
# Calculate Sepsis from SIRS and SI
#aggregated_df = calculate_sepsis(aggregated_df, task="sepsis1", consider_difference=args.sepsis_consider_sofa_difference, decrease_baseline=args.sepsis_decrease_sofa_baseline)
# print("Filtering out patients without enough data")
# # Filtering out patients without enough data:
# counts = aggregated_df['hadm_id'].value_counts()
# aggregated_df = aggregated_df[aggregated_df['hadm_id'].isin(counts[counts>(args.data_hours+args.gap_hours)].index)]
print("Computing approximate real dates...")
static_df = anchor_dates(static_df, ["admittime", "dischtime", "intime", "outtime"], patients)
if 'charttime' in aggregated_df.columns:
aggregated_df = aggregated_df.merge(static_df[['hadm_id','subject_id']], on='hadm_id')
aggregated_df = anchor_dates(aggregated_df, ['charttime'], patients)
# drop patients where any one feature has no vitals
if args.dascena_drop:
print("Dropping patients with any vital missing")
categories = ["heart rate", "respiratory rate", "temperature", "systolic blood pressure",
"diastolic blood pressure", "oxygen saturation"]
for vital in categories:
if args.group_by_level2:
if vital not in aggregated_df.columns:
continue
mask = aggregated_df.set_index("hadm_id")[vital].notnull().groupby(level=0).any()
else:
ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower() == vital]['itemid'].map(str))
valid_ids = [i for i in ids if i in aggregated_df.columns]
if len(valid_ids) == 0:
continue
mask = aggregated_df.set_index("hadm_id")[valid_ids].notnull().groupby(level=0).any().any(axis=1)
aggregated_df = aggregated_df.set_index("hadm_id")[mask].reset_index()
# Impute
print("Imputing NaNs")
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("- Ratio of Nans:", aggregated_df.isna().sum().sum() / total_values)
ignore_cols = ['hadm_id', 'charttime', 'hour', 'subject_id'] + list(aggregated_df.select_dtypes(include="bool").columns)
impute_cols = [col for col in aggregated_df.columns if col not in ignore_cols]
aggregated_df = impute_timeseries(aggregated_df, method=args.impute_method, feature_cols=impute_cols)
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("After imputation:")
print("- Ratio of zeroes:", (aggregated_df == 0).sum().sum() / total_values)
return static_df, aggregated_df
def create_labels(self, static_df, aggregated_df, task='sepsis3', threshold=None):
# generate per-patient sepsis3 label
sepsis_hadm_ids = aggregated_df.hadm_id[aggregated_df[task] == True].unique()
static_df['y'] = False
static_df.loc[static_df.hadm_id.isin(sepsis_hadm_ids), 'y'] = True
def extract_timerange(self, args, aggregated_df, task='sepsis3'):
sepsis_onset_hour = aggregated_df[aggregated_df[task+'_onset']][['hadm_id', 'hour']]
sepsis_onset_hour.rename(columns={'hour': task+'_onset_hour'}, inplace=True)
aggregated_df = extract_data_prior_to_event(aggregated_df, sepsis_onset_hour, key='hadm_id', events_hour_column=task+'_onset_hour',
gap_hours=args.gap_hours, data_hours=args.data_hours, case_control=args.case_control, dascena_control=args.dascena_control)
return aggregated_df
@RegisterDataset("mimic-iv-los")
class MIMIC_IV_Los_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "Length of Stay"
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays):
admissions, patients, icustays = filter_eligible_patients(admissions, patients, icustays,
args.min_patient_age, args.min_hours, args.gap_hours,
args.min_icu_stay, args.max_icu_stay)
chartevents = filter_table_patients(chartevents, patients)
print("Merging static data...")
static_df = patients[["subject_id", "gender", "anchor_age"]]
static_df = static_df.merge(admissions[["subject_id", "hadm_id", "admittime", "dischtime", "insurance", "admission_type", "marital_status", "ethnicity"]],
how="inner", on="subject_id")
static_df = static_df.merge(icustays[["hadm_id", "stay_id", "first_careunit", "intime", "outtime", "los"]],
how="inner", on="hadm_id")
static_df.rename(columns={"anchor_age": "age", "stay_id": "icustay_id"}, inplace=True)
print("Filter events")
chartevents_features = item_mapping.loc[item_mapping.origin == 'chartevents'].itemid.astype(int).tolist()
filtered_chartevents = filter_variables(chartevents, chartevents_features)
print("Standardizing units")
filtered_chartevents = standardize_units(filtered_chartevents, item_mapping)
print("Filter events to stay")
filtered_chartevents = filter_events_to_stay(filtered_chartevents, static_df)
if args.group_by_level2:
print("Group itemids by actual feature they represent")
item_mapping_chart = item_mapping.loc[item_mapping.origin == 'chartevents', ['itemid', 'LEVEL2']].astype({'itemid': int})
filtered_chartevents = filtered_chartevents.merge(item_mapping_chart, on='itemid', how='left')
group_mask = ~filtered_chartevents.LEVEL2.isna()
filtered_chartevents.loc[group_mask, 'itemid'] = filtered_chartevents.loc[group_mask, 'LEVEL2']
print("Hourly aggregation")
aggregated_df = hourly_aggregation(filtered_chartevents, static_df)
print("Computing approximate real dates...")
static_df = anchor_dates(static_df, ["admittime", "dischtime", "intime", "outtime"], patients)
if 'charttime' in aggregated_df.columns:
aggregated_df = aggregated_df.merge(static_df[['hadm_id','subject_id']], on='hadm_id')
aggregated_df = anchor_dates(aggregated_df, ['charttime'], patients)
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Reindexing timeseries")
aggregated_df = reindex_timeseries(aggregated_df)
# Imputing
print("Imputing NaNs")
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("- Ratio of Nans:", aggregated_df.isna().sum().sum() / total_values)
impute_cols = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour', 'subject_id']]
aggregated_df = impute_timeseries(aggregated_df, method=args.impute_method, feature_cols=impute_cols)
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("After imputation:")
print("- Ratio of zeroes:", (aggregated_df == 0).sum().sum() / total_values)
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
return static_df, aggregated_df
def create_labels(self, static_df, aggregated_df, task=None, threshold=4):
static_df['y'] = static_df['los'] >= threshold
# extract first data_hours data from each patient
def extract_timerange(self, args, aggregated_df, task=None):
# aggregated_df['hour'] = aggregated_df.groupby('hadm_id')['hour'].rank('first')
df = aggregated_df.loc[aggregated_df['hour']<= args.data_hours]
return df
@RegisterDataset("mimic-iv-icumort")
class MIMIC_IV_ICUMort_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "ICU Mortality"
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays):
admissions, patients, icustays = filter_eligible_patients(admissions, patients, icustays,
args.min_patient_age, args.min_hours, args.gap_hours,
args.min_icu_stay, args.max_icu_stay)
chartevents = filter_table_patients(chartevents, patients)
print("Merging static data...")
static_df = patients[["subject_id", "gender", "anchor_age"]]
static_df = static_df.merge(admissions[["subject_id", "hadm_id", "admittime", "dischtime", "deathtime", "insurance", "admission_type", "marital_status", "ethnicity"]],
how="inner", on="subject_id")
static_df = static_df.merge(icustays[["hadm_id", "stay_id", "first_careunit", "intime", "outtime", "los"]],
how="inner", on="hadm_id")
static_df['death_in_icu'] = (~static_df['deathtime'].isna()) & (static_df['deathtime'] >= static_df['intime']) & \
(static_df['deathtime'] <= static_df['outtime'])
static_df.rename(columns={"anchor_age": "age", "stay_id": "icustay_id"}, inplace=True)
print("Filter events")
chartevents_features = item_mapping.loc[item_mapping.origin == 'chartevents'].itemid.astype(int).tolist()
filtered_chartevents = filter_variables(chartevents, chartevents_features)
print("Standardizing units")
filtered_chartevents = standardize_units(filtered_chartevents, item_mapping)
print("Filter events to stay")
filtered_chartevents = filter_events_to_stay(filtered_chartevents, static_df)
if args.group_by_level2:
print("Group itemids by actual feature they represent")
item_mapping_chart = item_mapping.loc[item_mapping.origin == 'chartevents', ['itemid', 'LEVEL2']].astype({'itemid': int})
filtered_chartevents = filtered_chartevents.merge(item_mapping_chart, on='itemid', how='left')
group_mask = ~filtered_chartevents.LEVEL2.isna()
filtered_chartevents.loc[group_mask, 'itemid'] = filtered_chartevents.loc[group_mask, 'LEVEL2']
print("Hourly aggregation")
aggregated_df = hourly_aggregation(filtered_chartevents, static_df)
print("Computing approximate real dates...")
static_df = anchor_dates(static_df, ["admittime", "dischtime", "intime", "outtime"], patients)
if 'charttime' in aggregated_df.columns:
aggregated_df = aggregated_df.merge(static_df[['hadm_id','subject_id']], on='hadm_id')
aggregated_df = anchor_dates(aggregated_df, ['charttime'], patients)
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Reindexing timeseries")
aggregated_df = reindex_timeseries(aggregated_df)
# Imputing
print("Imputing NaNs")
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("- Ratio of Nans:", aggregated_df.isna().sum().sum() / total_values)
impute_cols = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour', 'subject_id']]
aggregated_df = impute_timeseries(aggregated_df, method=args.impute_method, feature_cols=impute_cols)
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("After imputation:")
print("- Ratio of zeroes:", (aggregated_df == 0).sum().sum() / total_values)
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
return static_df, aggregated_df
def create_labels(self, static_df, aggregated_df, task=None, threshold=None):
static_df['y'] = static_df['death_in_icu']
# extract first data_hours data from each patient
def extract_timerange(self, args, aggregated_df, task=None):
# aggregated_df['hour'] = aggregated_df.groupby('hadm_id')['hour'].rank('first')
df = aggregated_df.loc[aggregated_df['hour']<= args.data_hours]
return df
# args that affect cache
CACHE_ARGS = ['dataset', 'min_patient_age', 'data_hours', 'min_hours', 'gap_hours', 'min_icu_stay', 'max_icu_stay', 'item_map_path',
'sepsis_consider_sofa_difference', 'sepsis_decrease_sofa_baseline', 'group_by_level2', 'impute_method', 'dascena_drop']
def get_cache_filename(filename, args, extension='parquet'):
args_dict = vars(args)
args_str = ""
for arg in CACHE_ARGS:
arg_val = args_dict[arg]
args_str += '#' + arg + '=' + str(arg_val)
filename += "#" + md5(args_str) + '.' + extension
return filename
def calculate_SIRS(aggregated_df):
""" returns a dataframe with an additional column for SIRS score at every hour for the patient """
# Temperature
aggregated_df['temp_SIRS'] = 0
aggregated_df.loc[aggregated_df['223762'] < 10, '223762'] = float("NaN")
aggregated_df.loc[aggregated_df['223762'] > 50, '223762'] = float("NaN")
aggregated_df.loc[aggregated_df['223761'] < 70, '223761'] = float("NaN")
aggregated_df.loc[aggregated_df['223761'] > 120, '223761'] = float("NaN")
aggregated_df.loc[aggregated_df['223762'] > 38, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223762'] < 36, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223761'] > 100.4, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223761'] < 96.8, 'temp_SIRS'] = 1
# Heart rate
aggregated_df['hr_SIRS'] = 0
aggregated_df.loc[aggregated_df['220045'] > 300, '220045'] = float("NaN")
aggregated_df.loc[aggregated_df['220045'] < 0, '220045'] = float("NaN")
aggregated_df.loc[aggregated_df['220045'] > 90, 'hr_SIRS'] = 1
# Respiratory rate
aggregated_df['resp_SIRS'] = 0
aggregated_df.loc[aggregated_df['220210'] > 70, '220210'] = float("NaN")
aggregated_df.loc[aggregated_df['220210'] < 0, '220210'] = float("NaN")
aggregated_df.loc[aggregated_df['224690'] > 70, '224690'] = float("NaN")
aggregated_df.loc[aggregated_df['224690'] < 0, '224690'] = float("NaN")
aggregated_df.loc[aggregated_df['220210'] > 20, 'resp_SIRS'] = 1
aggregated_df.loc[aggregated_df['224690'] > 20, 'resp_SIRS'] = 1
# WBC
aggregated_df['wbc_SIRS'] = 0
aggregated_df.loc[aggregated_df['51301'] > 12, 'wbc_SIRS'] = 1
aggregated_df.loc[aggregated_df['51301'] < 4, 'wbc_SIRS'] = 1
# Aggregation
sirs_cols = ['temp_SIRS', 'hr_SIRS', 'resp_SIRS', 'wbc_SIRS']
aggregated_df[sirs_cols] = aggregated_df.groupby('hadm_id')[sirs_cols].ffill().fillna(0).astype(int)
aggregated_df['SIRS'] = aggregated_df[sirs_cols].sum(axis=1)
aggregated_df.drop(columns=sirs_cols, inplace=True)
return aggregated_df
def calculate_SOFA(aggregated_df):
""" returns a dataframe with an additional column for SOFA score at every hour for the patient """
scores = [0, 1, 2, 3, 4]
reverse_scores = [4, 3, 2, 1, 0]
# Respiration
aggregated_df.loc[aggregated_df['223835'] < 1, '223835'] = aggregated_df['223835'] * 100
aggregated_df.loc[aggregated_df['223835'] < 20, '223835'] = float("NaN")
aggregated_df['pao2fio2ratio'] = aggregated_df['50821'] / aggregated_df['223835'] * 100
aggregated_df['pao2fio2ratio_novent'] = aggregated_df.loc[aggregated_df['InvasiveVent']==0]['pao2fio2ratio']
aggregated_df['pao2fio2ratio_vent'] = aggregated_df.loc[aggregated_df['InvasiveVent']==1]['pao2fio2ratio']
aggregated_df['resp_SOFA'] = 0
aggregated_df.loc[aggregated_df['pao2fio2ratio_novent'] < 400, 'resp_SOFA'] = 1
aggregated_df.loc[aggregated_df['pao2fio2ratio_novent'] < 300, 'resp_SOFA'] = 2
aggregated_df.loc[aggregated_df['pao2fio2ratio_vent'] < 200, 'resp_SOFA'] = 3
aggregated_df.loc[aggregated_df['pao2fio2ratio_vent'] < 100, 'resp_SOFA'] = 4
# Liver
bilirubin_bins = [-1, 1.2, 2, 6, 12, float("inf")]
aggregated_df['liver_SOFA'] = pd.cut(aggregated_df['50885'], bilirubin_bins, labels=scores).astype('float')
# Coagulation
coag_bins = [-1, 20, 50, 100, 150, float("inf")]
aggregated_df['coag_SOFA'] = pd.cut(aggregated_df['51265'], coag_bins, labels=reverse_scores).astype('float')
# Renal
creat_bins = [-1, 1.2, 2, 3.5, 5, float("inf")]
aggregated_df['renal_SOFA'] = | pd.cut(aggregated_df['50912'], creat_bins, labels=scores) | pandas.cut |
from drop import utils
import pandas as pd
from pathlib import Path
from collections import defaultdict
from snakemake.logging import logger
import warnings
warnings.filterwarnings("ignore", 'This pattern has match groups')
class SampleAnnotation:
FILE_TYPES = ["RNA_BAM_FILE", "DNA_VCF_FILE", "GENE_COUNTS_FILE"]
SAMPLE_ANNOTATION_COLUMNS = FILE_TYPES + [
"RNA_ID", "DNA_ID", "DROP_GROUP", "GENE_ANNOTATION",
"PAIRED_END", "COUNT_MODE", "COUNT_OVERLAPS", "STRAND", "GENOME"
]
def __init__(self, file, root, genome):
"""
sa_file: sample annotation file location from config
root: output location for file mapping
"""
self.root = Path(root)
self.file = file
self.genome = genome
self.annotationTable = self.parse()
self.idMapping = self.createIdMapping()
self.sampleFileMapping = self.createSampleFileMapping()
self.rnaIDs = self.createGroupIds(file_type="RNA_BAM_FILE", sep=',')
self.dnaIDs = self.createGroupIds(file_type="DNA_VCF_FILE", sep=',')
# external counts
self.extGeneCountIDs = self.createGroupIds(file_type="GENE_COUNTS_FILE", sep=',')
def parse(self, sep='\t'):
"""
read and check sample annotation for missing columns
clean columns and set types
"""
data_types = {
"RNA_ID": str, "DNA_ID": str, "DROP_GROUP": str, "GENE_ANNOTATION": str,
"PAIRED_END": bool, "COUNT_MODE": str, "COUNT_OVERLAPS": bool, "STRAND": str, "GENOME": str
}
sa = pd.read_csv(self.file, sep=sep, index_col=False)
missing_cols = [x for x in self.SAMPLE_ANNOTATION_COLUMNS if x not in sa.columns.values]
if len(missing_cols) > 0:
if "GENOME" in missing_cols:
# deal with missing columns in data types, remove it to fix checks later
del data_types["GENOME"]
self.SAMPLE_ANNOTATION_COLUMNS.remove("GENOME")
missing_cols.remove("GENOME")
if "GENE_ANNOTATION" in missing_cols and "ANNOTATION" in sa.columns.values:
logger.info(
"WARNING: GENE_ANNOTATION must be a column in the sample annotation table, ANNOTATION is the old column name and will be deprecated in the future\n")
sa["GENE_ANNOTATION"] = sa.pop("ANNOTATION")
missing_cols.remove("GENE_ANNOTATION")
if len(missing_cols) > 0:
raise ValueError(f"Incorrect columns in sample annotation file. Missing:\n{missing_cols}")
sa = sa.astype(data_types)
# remove unwanted characters
sa["DROP_GROUP"] = sa["DROP_GROUP"].str.replace(" ", "").str.replace("(|)", "", regex=True)
return sa
#### Construction
def createIdMapping(self):
"""
Get mapping of RNA and DNA IDs
"""
return self.annotationTable[["RNA_ID", "DNA_ID"]].drop_duplicates().dropna()
def createSampleFileMapping(self):
"""
create a sample file mapping with unique entries of existing files
columns: [ID | ASSAY | FILE_TYPE | FILE_PATH ]
"""
assay_mapping = {'RNA_ID': ['RNA_BAM_FILE', 'GENE_COUNTS_FILE'], 'DNA_ID': ['DNA_VCF_FILE']}
assay_subsets = []
for id_, file_types in assay_mapping.items():
for file_type in file_types:
df = self.annotationTable[[id_, file_type]].dropna().drop_duplicates().copy()
df.rename(columns={id_: 'ID', file_type: 'FILE_PATH'}, inplace=True)
df['ASSAY'] = id_
df['FILE_TYPE'] = file_type
assay_subsets.append(df)
file_mapping = | pd.concat(assay_subsets) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def ChgNPFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY2)与一致预期净利润(FY1)的变化率
:desc: 未来第二年度一致预测净利润与未来第一年度一致预测净利润变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgNPFY1FY2'] = factor_earning_expect['NPFY2'] - factor_earning_expect['NPFY1'] / abs(
factor_earning_expect['NPFY1']) * 100
return factor_earning_expect
@staticmethod
def ChgEPSFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY2)与一致预期每股收益(FY1)的变化率
:desc: 未来第二年度一致预测每股收益与未来第一年度一致预测每股收益变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgEPSFY1FY2'] = factor_earning_expect['EPSFY2'] - factor_earning_expect['EPSFY1'] / abs(
factor_earning_expect['EPSFY1']) * 100
return factor_earning_expect
@staticmethod
def OptIncFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一月
:desc: 未来第一年度一致预测营业收入一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'operating_revenue_fy1',
'OptIncFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_三月
:desc: 未来第一年度一致预测营业收入三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'operating_revenue_fy1',
'OptIncFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_六月
:desc: 未来第一年度一致预测营业收入六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'operating_revenue_fy1',
'OptIncFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_一月
:desc: 未来第一年度一致预测营业收入一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'operating_revenue_fy1',
'OptIncFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_三月
:desc: 未来第一年度一致预测营业收入三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'operating_revenue_fy1',
'OptIncFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_六月
:desc: 未来第一年度一致预测营业收入六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'operating_revenue_fy1',
'OptIncFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
# @staticmethod
# def OptIncFY1SDT(tp_earning, factor_earning_expect, trade_date):
# """
# :name: 一致预测营业收入(FY1)标准差
# :desc: 未来第一年度一致预测营业收入标准差
# """
# return factor_earning_expect
@staticmethod
def CERATINGRATE1W(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一周评级变化率
:desc: 研究机构买入评级一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'rating2',
'CERATINGRATE1W')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CERATINGRATE1M(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一月评级变化率
:desc: 研究机构买入评级一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'rating2',
'CERATINGRATE1M')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CERATINGRATE3M(tp_earning, factor_earning_expect, trade_date):
"""
:name: 三月评级变化率
:desc: 研究机构买入评级三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'rating2',
'CERATINGRATE3M')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CERATINGRATE6M(tp_earning, factor_earning_expect, trade_date):
"""
:name: 六月评级变化率
:desc: 研究机构买入评级六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'rating2',
'CERATINGRATE6M')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": | pd.Categorical([1, 2, 1]) | pandas.Categorical |
import numpy as np
import pandas as pd
import pytest
from ber_public.deap import dim
@pytest.fixture
def building_fabric():
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = pd.Series([1.5])
thermal_bridging_factor = pd.Series([0.05])
effective_air_rate_change = pd.Series([0.5])
return (
floor_uvalue,
roof_uvalue,
wall_uvalue,
window_uvalue,
door_uvalue,
thermal_bridging_factor,
effective_air_rate_change,
)
@pytest.fixture
def building_area():
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = pd.Series([85.7])
window_area = pd.Series([29.6])
door_area = pd.Series([1.85])
return floor_area, roof_area, wall_area, window_area, door_area
@pytest.fixture
def building_floor_dimensions():
ground_floor_area = | pd.Series([63]) | pandas.Series |
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
sys.path.append('../data')
import my_autopct
# def pie_sublex_distribution_per_word(df, result_path):
# df_sublex = df.loc[:,[column for column in df.columns if column.startswith('sublex_')]]
# for (col_name,row),ipa in zip(df_sublex.iterrows(), df.IPA):
# row.plot.pie(autopct=my_autopct.my_autopct, legend=True)
# plt.ylabel('')
# plt.title(u'Posterior predictive sublexical assignment probability of %s' % (ipa))
# plt.savefig(os.path.join(result_path, 'pie_%s.png' % ipa))
# plt.gcf().clear()
def bar_log_sublex_prob_ratio(df, result_path, log_prob = 'target_log_prob', group_id_name = 'group_identifier'):
data = []
for group_id, df_group in df.groupby(group_id_name):
Foreign_word = df_group.loc[df_group.actual_sublex=='Foreign', 'IPA'].values[0]
Native_word = df_group.loc[df_group.actual_sublex=='Native', 'IPA'].values[0]
log_prob_Foreign_to_log_prob = (df_group.loc[df_group.actual_sublex=='Foreign', log_prob].values[0])
log_prob_Native_to_log_prob = (df_group.loc[df_group.actual_sublex=='Native', log_prob].values[0])
log_ratio = log_prob_Foreign_to_log_prob - log_prob_Native_to_log_prob
data.append([Foreign_word+'-'+Native_word, log_ratio])
df_data = | pd.DataFrame(data, columns=['word_pair', 'log_pred_prob_ratio']) | pandas.DataFrame |
# Copyright (C) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Models for the data being analysed and manipulated.
@author: drusk
"""
import random as rand
import numpy as np
import pandas as pd
from pml.utils import plotting, pandas_util
from pml.utils.errors import InconsistentSampleIdError
from pml.utils.errors import UnlabelledDataSetError
class DataSet(object):
"""
A collection of data that may be analysed and manipulated.
Columns are interpreted as features in the data set, and rows are samples
or observations.
"""
def __init__(self, data, labels=None):
"""
Creates a new DataSet from data of an unknown type. If data is itself
a DataSet object, then its contents are copied and a new DataSet is
created from the copies.
Args:
data:
Data of unknown type. The supported types are:
1) pandas DataFrame
2) Python lists
3) numpy array
4) an existing DataSet object
labels: pandas Series, Python list or Python dictionary
The classification labels for the samples in data. If they are
not known (i.e. it is an unlabelled data set) the value None
should be used. Default value is None (unlabelled).
Raises:
ValueError if the data or labels are not of a supported type.
InconsistentSampleIdError if labels were provided whose sample ids
do not match those of the data.
"""
if isinstance(data, pd.DataFrame):
self._dataframe = data
elif isinstance(data, list):
self._dataframe = | pd.DataFrame(data) | pandas.DataFrame |
import os
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
from mavedbconvert import empiric, constants
from tests import ProgramTestCase
class TestEmpiricInit(ProgramTestCase):
def setUp(self):
super().setUp()
self.path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
def test_offset_inframe(self):
empiric.Empiric(src=self.path, wt_sequence="ATC", offset=3)
def test_error_offset_not_inframe(self):
with self.assertRaises(ValueError):
empiric.Empiric(src=self.path, wt_sequence="ATC", offset=1)
def test_error_noncoding(self):
with self.assertRaises(ValueError):
empiric.Empiric(src=self.path, wt_sequence="ATC", is_coding=False)
class TestInferProEvent(unittest.TestCase):
def test_infers_equal_event(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="V", wt_aa="v", codon_pos=0),
"p.Val1=",
)
def test_infers_sub_event_event(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="V", wt_aa="F", codon_pos=0),
"p.Phe1Val",
)
def test_converts_triple_q_to_Xaa(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="?", wt_aa="V", codon_pos=0),
"p.Val1Xaa",
)
class TestInferNTEvent(unittest.TestCase):
def test_infers_equal_event(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="aaa", mut_codon="AAA", codon_pos=0),
"c.[1=;2=;3=]",
)
def test_infers_sub_event_event(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="ATC", mut_codon="GTA", codon_pos=0),
"c.[1A>G;2=;3C>A]",
)
def test_adds_codon_pos_multiplied_by_3_to_position(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="ATC", mut_codon="GTA", codon_pos=1),
"c.[4A>G;5=;6C>A]",
)
class TestEmpiric(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input, wt_sequence="AAA", one_based=False
)
def test_error_missing_amino_acid(self):
for nan in constants.extra_na:
df = pd.DataFrame({"Position": [0], "Amino Acid": [nan], "row_num": [0]})
self.empiric.validate_columns(df)
with self.assertRaises(ValueError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_value_error_codon_doesnt_match_aa_column(self):
with self.assertRaises(ValueError):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["V"], "Codon": ["AAT"], "row_num": [0]}
)
self.empiric.validate_columns(df)
self.empiric.parse_row(row=df.iloc[0, :])
def test_error_infer_nt_true_but_missing_codon_value(self):
for nan in constants.extra_na:
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "row_num": [0], "Codon": [nan]}
)
self.empiric.validate_columns(df)
with self.assertRaises(ValueError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_index_error_negative_position(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["K"], "row_num": [0], "Codon": ["AAA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = True
with self.assertRaises(IndexError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_index_error_out_of_codon_bounds(self):
df = pd.DataFrame(
{"Position": [56], "Amino Acid": ["K"], "row_num": [0], "Codon": ["AAA"]}
)
self.empiric.validate_columns(df)
with self.assertRaises(IndexError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_amino_acid_column_is_case_insensitive(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["v"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_infers_hgvs_pro_event_from_one_based_position(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = True
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_infers_hgvs_pro_event_from_zero_based_position(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.wt_sequence = "GTAAAA"
self.empiric.one_based = False
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys2Val")
def test_protein_output_is_singular_when_inferring_nt(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
hgvs_nt, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[1A>G;2A>T;3=]")
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_hgvs_nt_is_none_when_codon_is_not_in_axes(self):
df = pd.DataFrame({"Position": [0], "Amino Acid": ["V"], "row_num": [0]})
self.empiric.validate_columns(df)
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertIsNone(hgvs_nt)
def test_correctly_infers_hgvs_nt_positions_when_zero_based(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = False
self.empiric.wt_sequence = "GGGAAT"
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[4A>G;5A>T;6T>A]")
def test_correctly_infers_hgvs_nt_positions_when_one_based(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.empiric.one_based = True
self.empiric.wt_sequence = "GTAAAA"
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[1G>A;2T>A;3A>T]")
class TestEmpiricValidateColumns(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input, wt_sequence="AAA", one_based=False
)
def test_error_cannot_find_case_insensitive_aa_column(self):
df = pd.DataFrame({"Position": [1], "aa": ["N"], "Codon": ["AAT"]})
with self.assertRaises(ValueError):
self.empiric.validate_columns(df)
def test_error_cannot_find_case_insensitive_position_column(self):
df = pd.DataFrame({"pos": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
with self.assertRaises(ValueError):
self.empiric.validate_columns(df)
def test_sets_codon_column_as_none_if_not_present(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.codon_column, None)
def test_sets_codon_column_if_present(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.codon_column, "Codon")
def test_sets_position_column(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.position_column, "Position")
def test_sets_aa_column(self):
df = pd.DataFrame({"Position": [1], "amino acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.aa_column, "amino acid")
class TestEmpiricParseScoresInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input,
wt_sequence="AAA",
one_based=False,
input_type="scores",
score_column="A",
)
def test_deletes_position_amino_acid_codon_row_num_columns(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "Codon": ["AAT"], "A": [1.2]}
)
result = self.empiric.parse_input(df)
self.assertNotIn("Position", result.columns)
self.assertNotIn("Amino Acid", result.columns)
self.assertNotIn("Codon", result.columns)
self.assertNotIn("row_num", result.columns)
def test_keeps_additional_non_score_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertIn("B", result.columns)
def test_renames_score_column_to_score_and_drops_original(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertListEqual(list(df["A"]), list(result["score"]))
self.assertIn("B", result.columns)
self.assertNotIn("A", result.columns)
def test_sets_hgvs_pro_column(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(result[constants.pro_variant_col].values[0], "p.Lys1Asn")
def test_correctly_infers_hgvs_nt_column_when_codon_column_present(self):
df = pd.DataFrame(
{
"Position": [1],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
self.empiric.one_based = False
self.empiric.wt_sequence = "GGGAAA"
result = self.empiric.parse_input(df)
self.assertEqual(result[constants.nt_variant_col].values[0], "c.[4=;5=;6A>T]")
def test_orders_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(list(result.columns).index(constants.nt_variant_col), 0)
self.assertEqual(list(result.columns).index(constants.pro_variant_col), 1)
self.assertEqual(list(result.columns).index(constants.mavedb_score_column), 2)
def test_removes_null_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"B": [None],
"A": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertNotIn("B", result.columns)
def test_drops_nt_when_codon_column_is_not_provided(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "A": [1.2], "B": [2.4]}
)
result = self.empiric.parse_input(df)
self.assertNotIn(constants.nt_variant_col, result.columns)
def test_drops_non_numeric_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": ["a"],
}
)
result = self.empiric.parse_input(df)
self.assertNotIn("B", result.columns)
def test_keeps_int_type_as_int(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "Codon": ["AAT"], "A": [1]}
)
result = self.empiric.parse_input(df)
self.assertTrue(
np.issubdtype(
result[constants.mavedb_score_column].values[0], np.signedinteger
)
)
class TestEmpiricParseCountsInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input,
wt_sequence="AAA",
one_based=False,
input_type="counts",
score_column="A",
)
def test_orders_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(list(result.columns).index(constants.nt_variant_col), 0)
self.assertEqual(list(result.columns).index(constants.pro_variant_col), 1)
self.assertEqual(list(result.columns).index("A"), 2)
self.assertEqual(list(result.columns).index("B"), 3)
class TestEmpiricLoadInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.excel_path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.excel_header_footer_path = os.path.join(
self.data_dir, "empiric", "empiric_header_footer.xlsx"
)
self.csv_path = os.path.join(self.data_dir, "empiric", "tmp.csv")
self.tsv_path = os.path.join(self.data_dir, "empiric", "tmp.tsv")
self.excel_multisheet_path = os.path.join(
self.data_dir, "empiric", "empiric_multisheet.xlsx"
)
def test_extra_na_load_as_nan(self):
for value in constants.extra_na:
df = pd.read_excel(self.excel_path, engine="openpyxl")
df["A"] = [value] * len(df)
df.to_csv(self.csv_path, index=False)
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
expected = pd.Series([np.NaN] * len(df), index=df.index, name="A")
assert_series_equal(result["A"], expected)
def test_loads_first_sheet_by_default(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="score",
input_type=constants.score_type,
)
result = p.load_input_file()
expected = pd.read_excel(
self.excel_multisheet_path, na_values=constants.extra_na, engine="openpyxl"
)
assert_frame_equal(result, expected)
def test_loads_correct_sheet(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
sheet_name="Sheet3",
)
result = p.load_input_file()
expected = pd.read_excel(
self.excel_multisheet_path,
na_values=constants.extra_na,
sheet_name="Sheet3",
engine="openpyxl",
)
assert_frame_equal(result, expected)
def test_error_missing_sheet(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
sheet_name="BadSheet",
)
with self.assertRaises(KeyError):
p.load_input_file()
def test_handles_csv(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df.to_csv(self.csv_path, index=False, sep=",")
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
assert_frame_equal(result, df)
def test_loads_with_skipped_rows(self):
p = empiric.Empiric(
src=self.excel_header_footer_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
skip_header_rows=2,
skip_footer_rows=2,
)
result = p.load_input_file()
df = pd.read_excel(self.excel_path, engine="openpyxl")
assert_frame_equal(result, df)
def test_handles_tsv(self):
df = | pd.read_excel(self.excel_path, engine="openpyxl") | pandas.read_excel |
import mlrose_hiive as mlrose
import numpy as np
import pandas as pd
from time import clock
import os
import argparse
import data.DataProcessors as dp
import seaborn as sns
import matplotlib.pyplot as plt
os.environ['seed'] = '45604'
randomSeed = 45604
verbose = True
from sklearn.metrics import accuracy_score, log_loss, f1_score, confusion_matrix, make_scorer, precision_score, mean_squared_error, plot_confusion_matrix, roc_auc_score, recall_score
def plot_results(data_dir, param_name, param_display):
directory="./"+data_dir+"/images/"
if not os.path.exists(directory):
os.makedirs(directory)
path1='./'+data_dir
path2= "./"+data_dir+"/images/"
# nn
ga = pd.read_csv(os.path.join(data_dir,'gatrain_performance.csv'))
sa = pd.read_csv(os.path.join(data_dir,'satrain_performance.csv'))
rh = pd.read_csv(os.path.join(data_dir,'rhtrain_performance.csv'))
gd = pd.read_csv(os.path.join(data_dir,'gdtrain_performance.csv'))
plt.close()
plt.figure()
plt.plot( ga['Iterations'], ga[param_name], label='Gen Alg')
plt.plot( sa['Iterations'], sa[param_name], label='Sim Ann')
plt.plot( rh['Iterations'], rh[param_name], label='Random Hill')
plt.plot( gd['Iterations'], gd[param_name], label='Grad Desc')
plt.legend(title="Algorithm", loc="best")
x_title = "Iterations"
y_title = param_display
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title("Customer Churn ANN Optimized by RO Algorithms (Train Performance)")
plt.savefig(os.path.join(directory,"train_"+param_name+".png"), format='png', dpi=200, bbox_inches = 'tight', pad_inches = 0)
ga = pd.read_csv(os.path.join(data_dir,'gatest_performance.csv'))
sa = pd.read_csv(os.path.join(data_dir,'satest_performance.csv'))
rh = pd.read_csv(os.path.join(data_dir,'rhtest_performance.csv'))
gd = pd.read_csv(os.path.join(data_dir,'gdtest_performance.csv'))
plt.close()
plt.figure()
plt.plot( ga['Iterations'], ga[param_name], label='Gen Alg')
plt.plot( sa['Iterations'], sa[param_name], label='Sim Ann')
plt.plot( rh['Iterations'], rh[param_name], label='Random Hill')
plt.plot( gd['Iterations'], gd[param_name], label='Grad Desc')
plt.legend(title="Algorithm", loc="best")
x_title = "Iterations"
y_title = param_display
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title("Customer Churn ANN Optimized by RO Algorithms (Test Performance)")
plt.savefig(os.path.join(directory,"test_"+param_name+".png"), format='png', dpi=200, bbox_inches = 'tight', pad_inches = 0)
def get_model(algorithm, max_iters):
activation = "relu"
print(algorithm)
print(max_iters)
if algorithm == "rh":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'random_hill_climb', \
bias = True, is_classifier = True, early_stopping = True, restarts = 5, max_attempts =10,
max_iters = max_iters, clip_max = 10, random_state = randomSeed)
if algorithm == "ga":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'genetic_alg', \
bias = True, is_classifier = True, early_stopping = True, max_attempts =10,
max_iters = max_iters, clip_max = 10, mutation_prob = .10, random_state = randomSeed)
if algorithm == "sa":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'simulated_annealing', \
bias = True, is_classifier = True, early_stopping = True, max_attempts =10,
max_iters = max_iters, clip_max = 10, schedule = mlrose.GeomDecay(), random_state = randomSeed)
if algorithm == "gd":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'gradient_descent', \
bias = True, is_classifier = True, early_stopping = True, max_attempts =10,
max_iters = max_iters, clip_max = 10, random_state = randomSeed)
def run_neural_net(algorithm):
fullData = dp.CustomerChurnModel()
fullData.prepare_data_for_training()
dfTrain = | pd.DataFrame(columns=["Iterations","Accuracy","Precision","Recall","F1","ROC AUC","SquareError","TrainTime"]) | pandas.DataFrame |
import datetime as dt
import unittest
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose
from darkgreybox.fit import darkgreyfit
from darkgreybox.models import Ti
def error_metric(y: np.ndarray, Z: np.ndarray) -> float:
return np.sum(y - Z)
class DarkGreyFitTest(unittest.TestCase):
def test__darkgreyfit__returns_correct_dataframe__when_prefit_splits_specified(self):
train_start = dt.datetime(2021, 1, 1, 1, 0)
train_end = dt.datetime(2021, 1, 1, 6, 0)
test_start = dt.datetime(2021, 1, 1, 7, 0)
test_end = dt.datetime(2021, 1, 1, 9, 0)
rec_duration = 1
params = {
'Ti0': {'value': 10, 'vary': False},
'Ria': {'value': 1},
'Ci': {'value': 1},
}
y_train = pd.Series([10, 10, 20, 20, 20, 30])
X_train = pd.DataFrame(
index=pd.date_range(train_start, train_end, freq=f'{rec_duration}H'),
data={
'Ta': [10, 10, 10, 20, 20, 20],
'Ph': [0, 10, 0, 0, 10, 0],
'Ti0': [10, 10, 20, 20, 20, 30]
})
X_test = pd.DataFrame(
index= | pd.date_range(test_start, test_end, freq=f'{rec_duration}H') | pandas.date_range |
#!/usr/bin/python
print('datasets_merge - initiating.')
import os
import pandas as pd
pd.options.mode.chained_assignment = None
# set directories and files
cwd = os.getcwd()
input_folder = "0_input"
prices_folder = "data"
output_folder = "0_output"
temp_folder = "temp"
print('starting merging the financials and prices datasets')
# import
prices_table = pd.read_csv(os.path.join(cwd,input_folder,"2_prices_updated.csv"), low_memory=False)
fundamentals_table_annually = pd.read_csv(os.path.join(cwd,input_folder,"3_fundamentals_processed_annually.csv"), low_memory=False)
fundamentals_table = pd.read_csv(os.path.join(cwd,input_folder,"4_fundamentals_processed_quarterly.csv"), low_memory=False)
print("importing fundamentals and prices is done")
# adding TTM from t0,t1,t2,t3 before selecting t0 only for income and cash flows
df_ttm = fundamentals_table.groupby(['symbol'])[['totalRevenue', 'costOfRevenue'
,'totalCashFromOperatingActivities', 'capitalExpenditures'
, 'totalOperatingExpenses', 'totalCashflowsFromInvestingActivities']].sum()
df_ttm = df_ttm.reset_index(drop=False)
df_ttm['capitalExpenditures'].fillna(df_ttm['totalCashflowsFromInvestingActivities'], inplace=True)
df_ttm.rename(columns={'totalRevenue': 'totalRevenueTTM', 'costOfRevenue': 'costOfRevenueTTM'
, 'totalCashFromOperatingActivities': 'totalCashFromOperatingActivitiesTTM'
, 'totalOperatingExpenses': 'totalOperatingExpensesTTM'
, 'capitalExpenditures': 'capitalExpendituresTTM'
, 'Total Debt (mrq)': 'Debt'}, inplace=True)
print("ttm precalculated")
# select only latest quarterly data to filter out balance sheet
fundamentals_table = fundamentals_table[fundamentals_table['Period'] == "t0"]
print("fundamentals_table period = t0")
# splitting annual data
df_y0 = fundamentals_table_annually[fundamentals_table_annually['Period'] == "y0"]
df_y0.rename(columns={'totalRevenue': 'revenue_last_year'}, inplace=True)
df_y0 = df_y0[['symbol', 'revenue_last_year']]
#df_y_minus_1 = fundamentals_table_annually[fundamentals_table_annually['Period'] == "y-1"]
#df_y_minus_2 = fundamentals_table_annually[fundamentals_table_annually['Period'] == "y-2"]
#df_y_minus_3 = fundamentals_table_annually[fundamentals_table_annually['Period'] == "y-3"]
# finding historical averages
df_avg_historical = fundamentals_table_annually.groupby(['symbol'])[['totalRevenue', 'capitalExpenditures', 'costOfRevenue', 'propertyPlantEquipment']].mean()
df_avg_historical = df_avg_historical.reset_index(drop=False)
df_avg_historical.rename(columns={'totalRevenue': 'mean_historical_revenue'
, 'capitalExpenditures': 'mean_historical_capex'
, 'costOfRevenue': 'mean_historical_costOfRevenue'
, 'propertyPlantEquipment': 'mean_historical_propertyPlantEquipment'}, inplace=True)
df_avg_historical = df_avg_historical[['symbol', 'mean_historical_revenue', 'mean_historical_costOfRevenue'
, 'mean_historical_capex', 'mean_historical_propertyPlantEquipment']]
print("historical averages calculated")
# merge fundamentals and prices
df_merged = pd.merge(fundamentals_table, prices_table, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop'))
df_merged.drop([col for col in df_merged.columns if 'drop' in col], axis=1, inplace=True)
df_merged.rename(columns={'52 Week High 3': '52h'
, '52 Week Low 3': '52l'
, 'Quote Price': 'p'
, 'Quarterly Revenue Growth (yoy)': 'QtrGrwth'}, inplace=True)
print("raw fundamentals and prices merged")
# merge TTM
df_to_merge = df_merged
df_merged = pd.merge(df_to_merge, df_ttm, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop'))
df_merged.drop([col for col in df_merged.columns if 'drop' in col], axis=1, inplace=True)
print("ttm merged")
# merge last_year_revenue
df_to_merge = df_merged
df_merged = pd.merge(df_to_merge, df_y0, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop'))
df_merged.drop([col for col in df_merged.columns if 'drop' in col], axis=1, inplace=True)
print("last year revenue merged")
# merge df_avg_historical
df_to_merge = df_merged
df_merged = | pd.merge(df_to_merge, df_avg_historical, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop')) | pandas.merge |
import numpy as np
import pandas as pd
from wiser.viewer import Viewer
from allennlp.data import Instance
def score_labels_majority_vote(instances, gold_label_key='tags',
treat_tie_as='O', span_level=True):
tp, fp, fn = 0, 0, 0
for instance in instances:
maj_vote = _get_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
# Collects results into a dataframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p, r, f1 = _get_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
results = pd.DataFrame.from_records(
[record], columns=column_names, index=index)
results = pd.DataFrame.sort_index(results)
return results
def get_generative_model_inputs(instances, label_to_ix):
label_name_to_col = {}
link_name_to_col = {}
# Collects label and link function names
names = set()
for doc in instances:
if 'WISER_LABELS' in doc:
for name in doc['WISER_LABELS']:
names.add(name)
for name in sorted(names):
label_name_to_col[name] = len(label_name_to_col)
names = set()
for doc in instances:
if 'WISER_LINKS' in doc:
for name in doc['WISER_LINKS']:
names.add(name)
for name in sorted(names):
link_name_to_col[name] = len(link_name_to_col)
# Counts total tokens
total_tokens = 0
for doc in instances:
total_tokens += len(doc['tokens'])
# Initializes output data structures
label_votes = np.zeros((total_tokens, len(label_name_to_col)), dtype=np.int)
link_votes = np.zeros((total_tokens, len(link_name_to_col)), dtype=np.int)
seq_starts = np.zeros((len(instances),), dtype=np.int)
# Populates outputs
offset = 0
for i, doc in enumerate(instances):
seq_starts[i] = offset
for name in sorted(doc['WISER_LABELS'].keys()):
for j, vote in enumerate(doc['WISER_LABELS'][name]):
label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
if 'WISER_LINKS' in doc:
for name in sorted(doc['WISER_LINKS'].keys()):
for j, vote in enumerate(doc['WISER_LINKS'][name]):
link_votes[offset + j, link_name_to_col[name]] = vote
offset += len(doc['tokens'])
return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
gold_label_key='tags', span_level=True):
tp, fp, fn = 0, 0, 0
offset = 0
for instance in instances:
length = len(instance[gold_label_key])
if span_level:
scores = _score_sequence_span_level(
predictions[offset:offset+length], instance[gold_label_key])
else:
scores = _score_sequence_token_level(
predictions[offset:offset+length], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
offset += length
# Collects results into a dataframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p = round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1]
index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
results = pd.DataFrame.from_records(
[record], columns=column_names, index=index)
results = pd.DataFrame.sort_index(results)
return results
def score_tagging_rules(instances, gold_label_key='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LABELS'].items():
if lf_name not in lf_scores:
# Initializes true positive, false positive, false negative,
# correct, and total vote counts
lf_scores[lf_name] = [0, 0, 0, 0, 0]
scores = _score_sequence_span_level(predictions, instance[gold_label_key])
lf_scores[lf_name][0] += scores[0]
lf_scores[lf_name][1] += scores[1]
lf_scores[lf_name][2] += scores[2]
scores = _score_token_accuracy(predictions, instance[gold_label_key])
lf_scores[lf_name][3] += scores[0]
lf_scores[lf_name][4] += scores[1]
# Computes accuracies
for lf_name in lf_scores.keys():
if lf_scores[lf_name][3] > 0:
lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
lf_scores[lf_name][3] = round(lf_scores[lf_name][3], ndigits=4)
else:
lf_scores[lf_name][3] = float('NaN')
# Collects results into a dataframe
column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
results = | pd.DataFrame.from_dict(lf_scores, orient="index", columns=column_names) | pandas.DataFrame.from_dict |
""" test partial slicing on Series/Frame """
import pytest
from datetime import datetime, date
import numpy as np
import pandas as pd
import operator as op
from pandas import (DatetimeIndex, Series, DataFrame,
date_range, Index, Timedelta, Timestamp)
from pandas.util import testing as tm
class TestSlicing(object):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.loc['2005']
expected = df[df.index.year == 2005]
tm.assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
assert result == expected
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2001Q1']) == 90
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['1Q01']) == 90
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2005-11']) == 30
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['2005-11']) == 30
tm.assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
tm.assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-1-1']
assert result == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
tm.assert_series_equal(result, s.iloc[:24])
pytest.raises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60 * 4])
result = s['2005-1-1 20']
tm.assert_series_equal(result, s.iloc[:60])
assert s['2005-1-1 20:00'] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
tm.assert_series_equal(result, s.iloc[:60])
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60])
assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slice_second_precision(self):
rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59,
microsecond=999990),
periods=20, freq='US')
s = Series(np.arange(20), rng)
tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])
tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])
assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]
tm.assert_raises_regex(KeyError, '2005-1-1 00:00:00',
lambda: s['2005-1-1 00:00:00'])
def test_partial_slicing_dataframe(self):
# GH14856
# Test various combinations of string slicing resolution vs.
# index resolution
# - If string resolution is less precise than index resolution,
# string is considered a slice
# - If string resolution is equal to or more precise than index
# resolution, string is considered an exact match
formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',
'%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']
resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']
for rnum, resolution in enumerate(resolutions[2:], 2):
# we check only 'day', 'hour', 'minute' and 'second'
unit = Timedelta("1 " + resolution)
middate = datetime(2012, 1, 1, 0, 0, 0)
index = DatetimeIndex([middate - unit,
middate, middate + unit])
values = [1, 2, 3]
df = DataFrame({'a': values}, index, dtype=np.int64)
assert df.index.resolution == resolution
# Timestamp with the same resolution as index
# Should be exact match for Series (return scalar)
# and raise KeyError for Frame
for timestamp, expected in zip(index, values):
ts_string = timestamp.strftime(formats[rnum])
# make ts_string as precise as index
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == expected
pytest.raises(KeyError, df.__getitem__, ts_string)
# Timestamp with resolution less precise than index
for fmt in formats[:rnum]:
for element, theslice in [[0, slice(None, 1)],
[1, slice(1, None)]]:
ts_string = index[element].strftime(fmt)
# Series should return slice
result = df['a'][ts_string]
expected = df['a'][theslice]
tm.assert_series_equal(result, expected)
# Frame should return slice as well
result = df[ts_string]
expected = df[theslice]
tm.assert_frame_equal(result, expected)
# Timestamp with resolution more precise than index
# Compatible with existing key
# Should return scalar for Series
# and raise KeyError for Frame
for fmt in formats[rnum + 1:]:
ts_string = index[1].strftime(fmt)
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == 2
pytest.raises(KeyError, df.__getitem__, ts_string)
# Not compatible with existing key
# Should raise KeyError
for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:
ts = index[1] + Timedelta("1 " + res)
ts_string = ts.strftime(fmt)
pytest.raises(KeyError, df['a'].__getitem__, ts_string)
pytest.raises(KeyError, df.__getitem__, ts_string)
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT': ["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER': ["ABC", "MNP", "XYZ", "XYZ"],
'val': [1, 2, 3, 4]},
index=date_range("2013-06-19 09:30:00",
periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([
[1]
], index=Index(['ABC'], name='TICKER'), columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
tm.assert_frame_equal(result, expected)
expected = df_multi.loc[
(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
tm.assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on
# multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
pytest.raises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(
'2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
tm.assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.xs('2000-1-4')
result = df2.loc[pd.Timestamp('2000-1-4')]
tm.assert_frame_equal(result, expected)
def test_partial_slice_doesnt_require_monotonicity(self):
# For historical reasons.
s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10))
nonmonotonic = s[[3, 5, 4]]
expected = nonmonotonic.iloc[:0]
timestamp = pd.Timestamp('2014-01-10')
tm.assert_series_equal(nonmonotonic['2014-01-10':], expected)
tm.assert_raises_regex(KeyError,
r"Timestamp\('2014-01-10 00:00:00'\)",
lambda: nonmonotonic[timestamp:])
tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected)
tm.assert_raises_regex(KeyError,
r"Timestamp\('2014-01-10 00:00:00'\)",
lambda: nonmonotonic.loc[timestamp:])
def test_loc_datetime_length_one(self):
# GH16071
df = pd.DataFrame(columns=['1'],
index=pd.date_range('2016-10-01T00:00:00',
'2016-10-01T23:59:59'))
result = df.loc[datetime(2016, 10, 1):]
tm.assert_frame_equal(result, df)
result = df.loc['2016-10-01T00:00:00':]
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize('datetimelike', [
Timestamp('20130101'), datetime(2013, 1, 1),
date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')])
@pytest.mark.parametrize('op,expected', [
(op.lt, [True, False, False, False]),
(op.le, [True, True, False, False]),
(op.eq, [False, True, False, False]),
(op.gt, [False, False, False, True])])
def test_selection_by_datetimelike(self, datetimelike, op, expected):
# GH issue #17965, test for ability to compare datetime64[ns] columns
# to datetimelike
df = DataFrame({'A': [pd.Timestamp('20120101'),
pd.Timestamp('20130101'),
np.nan, | pd.Timestamp('20130103') | pandas.Timestamp |
import calendar
import pandas as pd
from colourutils import extend_colour_map
def extend_data_range(data):
"""
Extends the index of the given Series so that it has daily values, starting from the 1st of the earliest month and
ending on the last day of the latest month.
:param data: The Series to be extended with a datetime index
:return: The Series with an extended daily index
"""
earliest_date = data.index.min()
first_month_start = pd.Timestamp(year=earliest_date.year, month=earliest_date.month, day=1)
latest_date = data.index.max()
_, last_date_of_month = calendar.monthrange(latest_date.year, latest_date.month)
last_month_end = | pd.Timestamp(year=latest_date.year, month=latest_date.month, day=last_date_of_month) | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
下载合约分钟
由于数据量很大,可以只下载一些关键的
比如
主力、次主力
近月、远月
一定要通过update_first_last.py更新数据的下载范围
否则会全下,还会覆盖前面的数据
"""
import os
import sys
from datetime import datetime, timedelta
import pandas as pd
from WindPy import w
from demo_future.E01_download_daily import read_constituent_at_date, merge_constituent_date
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__, __CONFIG_H5_FUT_SECTOR_DIR__
from kquant_data.utils.symbol import split_alpha_number
from kquant_data.utils.xdatetime import yyyyMMdd_2_datetime, datetime_2_yyyyMMddHHmm
from kquant_data.wind.tdays import read_tdays
from kquant_data.wind.wsi import download_min_ohlcv
from kquant_data.xio.csv import read_datetime_dataframe
# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding()) # 查看修改前的
try:
sys._enablelegacywindowsfsencoding() # 修改
print(sys.getfilesystemencoding()) # 查看修改后的
except:
pass
def download_constituent_min(w, dirpath, date, ipo_last_trade, first_last, wind_code_set, trading_days):
constituent = read_constituent_at_date(dirpath, date)
if constituent is None:
# 没有对应的板块,因当是与上次一样导致
# 没关系,上次数据应当已经下载过了
return wind_code_set
constituent_dt = merge_constituent_date(constituent, ipo_last_trade, first_last)
for i in range(constituent_dt.shape[0]):
row = constituent_dt.iloc[i]
wind_code = row['wind_code']
# 当前会话,不重复下载
if wind_code in wind_code_set:
continue
wind_code_set.add(wind_code)
# 时间已经到期了,不重复下载
# 这里考虑时间
if datetime_2_yyyyMMddHHmm(row['start']) == datetime_2_yyyyMMddHHmm(row['end']):
continue
product, num = split_alpha_number(wind_code)
path_dir = os.path.join(root_path, product)
if not os.path.exists(path_dir):
os.mkdir(path_dir)
path_csv = os.path.join(path_dir, '%s.csv' % wind_code)
# 从开始到结束,可能跨度太长,特别是新下数据,可能超过一年,所以决定按月下载,这样更快
# 保存时如果多次保存,然后放在一个文件中,会有问题
trading_days['idx'] = range(len(trading_days))
start = row['start']
end = row['end']
trading_days_idx = trading_days[start._date_repr:end._date_repr]['idx']
rng = list(range(trading_days_idx[0], trading_days_idx[-1], 30))
# 右闭
rng.insert(len(rng), trading_days_idx[-1])
for idx, r in enumerate(rng):
if idx == 0:
continue
start_ = trading_days.iloc[rng[idx - 1]]['date']
end_ = trading_days.iloc[r]['date']
if idx == 1:
# 第一个位置比较特殊,要取到前一个交易日的晚上
start_ = trading_days.iloc[rng[idx - 1] - 1]['date']
start_ += pd.Timedelta('20H') # 会遇到不是交易日的问题
end_ += pd.Timedelta('16H')
print(start_, end_)
try:
df_old = pd.read_csv(path_csv, index_col=0, parse_dates=True)
# 合并前先删除空数据
df_old.dropna(axis=0, how='all', thresh=3, inplace=True)
except:
df_old = None
print(row)
df_new, wind_code = download_min_ohlcv(w, wind_code, start, end)
df = pd.concat([df_old, df_new])
df = df[~df.index.duplicated(keep='last')]
print(path_csv)
df.to_csv(path_csv)
# 只做一个测试
# break
return wind_code_set
if __name__ == '__main__':
# 数据太多,是否减少数据下载更合适?比如只下主力,次主力,最近月,最远月
# 也可选择全下
# 分钟数据是按天分割,还是放在一个文件中呢?
# 下载是从哪下到哪呢?
w.start()
# 注意,数据中有可能出现0
path_ipo_last_trade = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, 'ipo_last_trade_trading.csv')
ipo_last_trade = | pd.read_csv(path_ipo_last_trade) | pandas.read_csv |
import src.database.build_db as db
import src.weather as weather
import src.send_email as email
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
from dotenv import load_dotenv
import requests
import os
import pymysql
import sys
import json
import streamlit as st
import matplotlib.pyplot as plt
from sklearn import datasets
import seaborn as sns
import datetime
import numpy as np
import pickle
import plotly.express as px
import plotly.graph_objects as go
import h2o
h2o.init()
rnd_forest_model = pickle.load(open("models/best_rf.pkl", 'rb'))
st.set_page_config(layout="wide")
st.set_option('deprecation.showPyplotGlobalUse', False)
# My functions
if __name__ == '__main__':
flag = False # DONT DO THIS FOR HEROKU
if flag:
conn = db.connect_to_mysql()
if conn and flag:
db.create_schemas(conn)
# load data
df_sales = pd.read_csv('data/raw_data/sales_clean.csv')
df_weather = weather.df_weather
# Other functions
def get_holidays(date_min, date_max):
holidays = pd.read_excel('data/db_load_files/calendario.xls')
holidays.rename(columns={'Dia': 'date', 'laborable / festivo / domingo festivo': 'day_type',
'Festividad': 'holiday_name', 'Tipo de Festivo': 'holiday_type'}, inplace=True)
del holidays['Dia_semana']
holidays['date'] = pd.to_datetime(holidays['date'])
mask = (holidays['date'] >= date_min) & (holidays['date'] <= date_max)
holidays = holidays.loc[mask]
return holidays
# Other functions
def prepare_data(df_weather, model):
df_test = df_weather.copy()
df_test['date'] = pd.to_datetime(df_test['date'])
df_test["day_of_week"] = df_test.date.dt.day_name()
df_test["month_name"] = df_test.date.dt.month_name()
df_test["day"] = df_test.date.dt.day
df_test["year"] = df_test.date.dt.year
df_placeholder = df_sales.drop(columns=['total_sales'])
df_placeholder['date'] = pd.to_datetime(df_placeholder['date'])
df_test = df_test.append(df_placeholder)
conditions = ["Rain", "Thunderstorm", "Drizzle", "Snow"]
df_test['did_rain'] = df_test['type'].apply(
lambda x: 1 if x in conditions else 0)
df_test['total_precip_mm'] = df_test['did_rain'].apply(
lambda x: 8 if x == 1 else 0)
date_min = df_test.date.min()
date_max = df_test.date.max()
df_holidays = get_holidays(date_min, date_max)
df_merged = pd.merge(df_test, df_holidays, how="right", on="date")
df_merged['is_closed'] = 0
df_merged['is_lockdown'] = 0
df_merged['is_curfew'] = 0
df_merged = df_merged.set_index('date')
df_merged['year'] = df_merged.year.astype('category')
del df_merged['day']
del df_merged['holiday_type']
del df_merged['holiday_name']
del df_merged['type']
df_merged = pd.get_dummies(df_merged, dummy_na=True)
df_merged = df_merged.iloc[-8:]
del df_merged['day_type_nan']
del df_merged['month_name_nan']
del df_merged['day_of_week_nan']
del df_merged['year_nan']
df_merged['prev_sales'] = df_sales.total_sales.mean()
df_merged['is_post_holiday'] = 0
df_merged['is_pre_holiday'] = 0
df_test = df_merged.copy()
results = predict_data(df_test, model)
return results
def predict_data(df_test, model):
if model == 'forest':
df_results = pd.DataFrame()
predictions = rnd_forest_model.predict(df_test)
round_predicts = [round(num, 2) for num in predictions]
df_results = pd.DataFrame(round_predicts, columns=[
'predict'])
# df_results['Sales Prediction'] = pd.Series(round_predicts, index=df_results.index)
return df_results
elif model == "deep":
# df_results = df_weather.copy()
saved_model = h2o.load_model(
'models/deeplearning/DeepLearning_grid__1_AutoML_20210519_134516_model_1')
stacked_test = df_test.copy()
# Conversion into a H20 frame to train
h2test = h2o.H2OFrame(stacked_test)
predicted_price_h2 = saved_model.predict(
h2test).as_data_frame()
df_results = predicted_price_h2.copy()
# df_results['Sales Prediction'] = predicted_price_h2.predict
return df_results
elif model == "stacked":
# df_results = df_weather.copy()
saved_model = h2o.load_model(
'models/autostacked/StackedEnsemble_AllModels_AutoML_20210519_134516')
stacked_test = df_test.copy()
# Conversion into a H20 frame to train
h2test = h2o.H2OFrame(stacked_test)
predicted_price_h2 = saved_model.predict(
h2test).as_data_frame()
df_results = predicted_price_h2.copy()
# df_results['Sales Prediction'] = predicted_price_h2.predict
return df_results
h20_stacked_model = prepare_data(df_weather, "stacked")
forest_weekly_outlook = prepare_data(df_weather, "forest")
deep_learn_model = prepare_data(df_weather, "deep")
df_main = df_weather.copy()
df_main.rename(columns={"type": "Weather",
"average_temp": "Temperature"}, inplace=True)
df_main['RndForest Sales'] = forest_weekly_outlook['predict']
df_main['DeepLearn Sales'] = deep_learn_model['predict']
df_main['h2O Sales'] = h20_stacked_model['predict']
df_main['date'] = pd.to_datetime(df_main['date'])
df_main['date'] = df_main['date'].dt.strftime('%d/%m/%Y')
df_main['AVG Prediction'] = df_main[[
'DeepLearn Sales', 'h2O Sales', 'RndForest Sales']].mean(axis=1)
def get_employees(x):
if x < 1000:
return 3
elif x >= 1000 and x < 2000:
return 4
elif x >= 2000 and x < 4000:
return 5
elif x >= 4000 and x < 6000:
return 6
elif x >= 6000 and x < 8000:
return 7
elif x >= 8000 and x < 10000:
return 8
elif x >= 10000:
return 10
df_main['Employees Needed'] = df_main['AVG Prediction'].apply(
lambda x: get_employees(x))
st.image('images/logo_large.png', width=300)
st.write("""
# CityPlay Sales Prediction App
This app predicts **sales for CityPlay, a bowling alley in Madrid**!
""")
st.write('---')
# sidebar
st.sidebar.header('Choose Input Params')
def user_input_features():
today = datetime.date.today()
week_from_now = today + datetime.timedelta(days=8)
start_date = st.sidebar.date_input('Date input', week_from_now)
holidays = ["Normal day", "Holiday", "Holiday-eve", "Post-Holiday"]
holiday_choice = st.sidebar.radio("What Type of day is it?", holidays)
prev_sales_value = st.sidebar.number_input('Do you happen to know the sales the day before? The default is the average.',
min_value=0.00, max_value=20000.00, value=2455.00, step=20.00, format=None, key=None)
temp = st.sidebar.slider('Temperature', value=17,
min_value=-15, max_value=48)
if not temp:
temp = 15
will_it_rain = [False, True]
did_rain_value = st.sidebar.radio(
"Predict Rain/Snow (People will stay home if it rains a lot!)", will_it_rain)
if did_rain_value:
mm = st.sidebar.slider('RainFall in millimeters', value=8.5,
min_value=0.5, max_value=29.5, step=0.5)
else:
mm = 0
data_list = [start_date]
df_test_day = pd.DataFrame([data_list])
df_test_day.columns = ['date']
df_test_day['date'] = pd.to_datetime(df_test_day['date'])
df_test_day2 = df_test_day.copy()
day_of_week = str(df_test_day.date.dt.day_name()).split()[1]
weekend = ["Saturday", "Sunday"]
df_test_day["day_of_week"] = df_test_day.date.dt.day_name()
df_test_day["month_name"] = df_test_day.date.dt.month_name()
df_test_day["day"] = df_test_day.date.dt.day
df_test_day["year"] = df_test_day.date.dt.year
df_placeholder2 = df_sales.drop(columns=['total_sales'])
df_placeholder2['date'] = pd.to_datetime(df_placeholder2['date'])
df_test_day = df_test_day.append(df_placeholder2)
df_test_day['did_rain'] = 1 if did_rain_value == True else 0
df_test_day['total_precip_mm'] = mm
df_test_day['day_type_domingo'] = df_test_day['day_of_week'].apply(
lambda x: 1 if x == "Sunday" else 0)
df_test_day['day_type_sábado'] = df_test_day['day_of_week'].apply(
lambda x: 1 if x == "Saturday" else 0)
df_test_day['day_type_festivo'] = 1 if holiday_choice == "Holiday" else 0
if holiday_choice == "Normal day":
if day_of_week not in weekend:
df_test_day['day_type_laborable'] = 1
else:
df_test_day['day_type_laborable'] = 0
elif holiday_choice == "Holiday":
if day_of_week in weekend:
df_test_day['day_type_sábado'] = 0
df_test_day['day_type_domingo'] = 0
else:
df_test_day['day_type_laborable'] = 0
df_test_day['is_post_holiday'] = 1 if holiday_choice == "Post-Holiday" else 0
df_test_day['is_pre_holiday'] = 1 if holiday_choice == "Holiday-eve" else 0
df_test_day['average_temp'] = temp
df_test_day['is_closed'] = 0
df_test_day['is_lockdown'] = 0
df_test_day['is_curfew'] = 0
df_test_day['prev_sales'] = prev_sales_value
df_test_day = df_test_day.set_index('date')
df_test_day['year'] = df_test_day.year.astype('category')
del df_test_day['day']
df_test_day = pd.get_dummies(df_test_day, dummy_na=True)
df_test_day = df_test_day.iloc[0:1]
del df_test_day['month_name_nan']
del df_test_day['day_of_week_nan']
del df_test_day['year_nan']
df_test2 = df_test_day.copy()
df_test2.to_csv("sampletesting.csv", index=True)
res_stacked_df = predict_data(df_test2, "stacked")
res_deep_df = predict_data(df_test2, "deep")
df_test_day2['date'] = pd.to_datetime(df_test_day2['date'])
df_test_day2['date'] = df_test_day2['date'].dt.strftime('%d/%m/%Y')
df_test_day2['DeepLearn Sales'] = res_deep_df['predict']
df_test_day2['h2O Sales'] = res_stacked_df['predict']
df_test_day2['AVG Prediction'] = df_test_day2[[
'DeepLearn Sales', 'h2O Sales']].mean(axis=1)
def get_employees(x):
if x < 1000:
return 3
elif x >= 1000 and x < 2000:
return 4
elif x >= 2000 and x < 4000:
return 5
elif x >= 4000 and x < 6000:
return 6
elif x >= 6000 and x < 8000:
return 7
elif x >= 8000 and x < 10000:
return 8
elif x >= 10000:
return 10
df_test_day2['Employees Needed'] = df_test_day2['AVG Prediction'].apply(
lambda x: get_employees(x))
return df_test_day2
results_df = user_input_features()
# main
st.text('Your Query:')
st.dataframe(results_df.style.format(
{'h2O Sales': '{:.2f}', 'DeepLearn Sales': '{:.2f}', 'AVG Prediction': '{:.2f}'}))
st.text('Weekly Outlook')
st.dataframe(df_main.style.format(
{'Temperature': '{:.1f}', 'RndForest Sales': '{:.2f}', 'h2O Sales': '{:.2f}', 'DeepLearn Sales': '{:.2f}', 'AVG Prediction': '{:.2f}'}))
st.text('Send Report Email:')
if st.button('SEND'):
email.send_email(df_main)
st.write('Email sent succesfully!')
# VISUALS
df_graphics = | pd.read_csv('data/db_load_files/clean_data.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import talib
class Indicators(object):
"""
Input: Price DataFrame, Moving average/lookback period and standard deviation multiplier
This function returns a dataframe with 5 columns
Output: Prices, Moving Average, Upper BB, Lower BB and BB Val
"""
def bb(self, l_sym, df_price, time_period, st_dev_u, st_dev_l):
df_bb_u = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_m = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_l = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_bb_u[sym], df_bb_m[sym], df_bb_l[sym] = talib.BBANDS(np.asarray(df_price[sym]), timeperiod=time_period, nbdevup=st_dev_u, nbdevdn=st_dev_l)
except:
pass
return df_bb_u, df_bb_m, df_bb_l
def ema(self, l_sym, df_price, time_period):
df_ema = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ema[sym] = talib.EMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ema
def ma(self, l_sym, df_price, time_period):
df_ma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ma[sym] = talib.MA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ma
def sma(self, l_sym, df_price, time_period):
df_sma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_sma[sym] = talib.SMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_sma
def adx(self, l_sym, df_high, df_low, df_close, time_period):
df_adx = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_adx[sym] = talib.ADX(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod = time_period)
except:
pass
return df_adx
def mom(self, l_sym, df_price, time_period):
df_mom = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_mom[sym] = talib.MOM(np.asarray(df_price[sym]), timeperiod = time_period)
except:
pass
return df_mom
def atr(self, l_sym, df_high, df_low, df_close, time_period):
df_atr = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_atr[sym] = talib.ATR(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod=time_period)
except:
pass
return df_atr
def macd(self, l_sym, df_price, fast_period, slow_period, signal_period):
df_macd = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdsignal = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdhist = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_macd[sym], df_macdsignal[sym], df_macdhist[sym] = talib.MACD(np.asarray(df_price[sym]), fastperiod=fast_period, slowperiod=slow_period, signalperiod=signal_period)
except:
pass
return df_macd, df_macdsignal, df_macdhist
def wavec(self, l_sym, df_three, df_four, df_five):
df_ca = pd.DataFrame(columns=l_sym, index=df_three.index)
df_cb = | pd.DataFrame(columns=l_sym, index=df_three.index) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.core.accessor import AccessorProperty
import pandas.plotting._core as gfx
def torque_column_labels():
return [f'torque_{i}' for i in range(361)]
class RevolutionPlotMethods(gfx.FramePlotMethods):
polar_angles = np.arange(0, 361) / (180 / np.pi)
torque_columns = torque_column_labels()
def polar(self, *args, **kwargs):
ax = plt.subplot(111, projection='polar')
torque = self._data[self.torque_columns].mean()
ax.plot(self.polar_angles, torque, *args, **kwargs)
ax.set_theta_offset(np.pi/2)
# ax.set_theta_direction(-1)
xticks_num = 8
xticks = np.arange(0, 2*np.pi, 2 * np.pi / xticks_num)
ax.set_xticks(xticks)
rad_to_label = lambda i: '{}°'.format(int(i / (2 * np.pi) * 360) % 180)
ax.set_xticklabels([rad_to_label(i) for i in xticks])
ax.set_yticklabels([])
return ax
class RevolutionDataFrame(pd.DataFrame):
@property
def _constructor(self):
return RevolutionDataFrame
def compute_min_max_angles(self):
# @TODO this method is quite memory inefficient. Row by row calculation is better
torque_columns = torque_column_labels()
torque_T = self.loc[:, torque_columns].transpose().reset_index(drop=True)
left_max_angle = torque_T.iloc[:180].idxmax()
right_max_angle = torque_T.iloc[180:].idxmax() - 180
left_min_angle = pd.concat([torque_T.iloc[:135], torque_T.iloc[315:]]).idxmin()
right_min_angle = torque_T.iloc[135:315].idxmin() - 180
left_max = pd.DataFrame(left_max_angle)
right_max = pd.DataFrame(right_max_angle)
left_min = | pd.DataFrame(left_min_angle) | pandas.DataFrame |
"""
<NAME>
<EMAIL>
The Tree of learning bears the noblest fruit,
but noble fruit tastes bad.
Proceed Formally.
"""
# Omitted the correlative uncertainty in the calibration constant
# since the TE value and Area values both are dependent on T and B
"""
TODO: Ship the toolsuite with a .ini so columnames can be generalized.
TODO: Seperate the TE from the Enhanced routines. Condense TE error propogation
"""
import pandas, numpy, variablenames
from scipy.stats import mode
import datetime
from matplotlib import pyplot as plt
from matplotlib import rc
plt.locator_params(axis='y', nbins=6)
font = {'size': 12}
rc('font', **font)
def header(self, mystr):
s = 7
lstr = len(mystr) +2
width = lstr+s*2+2
print("@"*width)
print(str("{0:1}{2:^"+str(s)+"}{1:^"+str(lstr)+"}{2:^"+str(s)+"}{0:1}").format("@",mystr, ' '*s))
print("@"*width)
def report(number, sigfigs=3):
# This reports significant figures
# by exploiting the exponential format
# since I could not find something prebuilt.
formatstring = "{0:."+str(sigfigs)+"E}"
string = str(float(formatstring.format(number)))
return string
def deuterontepol(B,T):
# Spin 1 TE Equation
k = 1.38064852 * 10 ** -23
gammad_over_2pi = 6.535902311 #MHz/T
h_over_2kb=2.4*10**-5
a=h_over_2kb*gammad_over_2pi
x = a*B/T
return 4*numpy.tanh(x)/(3+numpy.tanh(x)**2)
def pBdeuterontepol(B,T):
# Partial derivative for uncert propogation
# default Mu is for the proton
k = 1.38064852 * 10 ** -23
gammad_over_2pi = 6.535902311 #MHz/T
h_over_2kb=2.4*10**-5
a=h_over_2kb*gammad_over_2pi
x = a*B/T
return - 4*a*(numpy.tanh(x)**2-3)*(1/numpy.cosh(x)**2)/(T**2*(numpy.tanh(x)**2+3)**2)
def pTdeuterontepol(B,T):
# Partial derivative for uncert propogation
# default Mu is for the proton
k = 1.38064852 * 10 ** -23
gammad_over_2pi = 6.535902311 #MHz/T
h_over_2kb=2.4*10**-5
a=h_over_2kb*gammad_over_2pi
x =a*B/T
return 4*a*B*(numpy.tanh(x)**2-3)*(1/numpy.cosh(x)**2)/(T**2*(numpy.tanh(x)**2+3)**2)
def tpol(b, t, mu = 1.4106067873 * 10 ** -26):
# Spin 1/2 TE Equation
k = 1.38064852 * 10 ** -23
x = mu * b / (k * t)
return numpy.tanh(x)
def pBtpol(b, t, mu = 1.4106067873 * 10 ** -26):
# Partial derivative for uncert propogation
# default Mu is for the proton
k = 1.38064852 * 10 ** -23
x = mu * b / (k * t)
return - mu/(k*t)*1/(numpy.cosh(x))**2
def pTtpol(b, t, mu = 1.4106067873 * 10 ** -26):
# Partial derivative for uncert propogation
# default Mu is for the proton
k = 1.38064852 * 10 ** -23
x = mu * b / (k * t)
return - mu*b/(k*t**2)*1/(numpy.cosh(x))**2
def collator(datapath, te=False, constant=1, home=None, deuteron=False, to_save = [], title=None, enforce_T3=False, enforce_VP=False, prevanalized=None, N=1):
plt.clf()
if prevanalized is None:
# Prevanalized is a toggleable function that allows
# for a tertiary analysis on a dataset that has
# already been analyzed by the global_interpreter
#
# this section is for a first, fresh dataset analysis.
pltsave = "Enhanced_Results" if not te else "TE_Results"
pltsave = title if title is not None else pltsave
with open(datapath, 'r') as f:
df = pandas.read_csv(f)
df[variablenames.gi_time] = pandas.to_datetime(df[variablenames.gi_time], format="%Y-%m-%d %H:%M:%S")
df = df.sort_values(by=variablenames.gi_time)
mhz_to_b = 42.58 if not deuteron else 6.536
# Pull out the data needed to do the analysis from our file.
if not deuteron:
y1 = df[variablenames.gi_centroidlabel].values.astype(float)
y1b = df[variablenames.gi_centroidlabel].values.astype(float)/mhz_to_b
y3 = df[variablenames.gi_lorentzianarea].values.astype(float)
y3a = df[variablenames.gi_dataarea].values.astype(float)
relative_error = df[variablenames.gi_relchisq].values.astype(float)
try:
vpy = df[variablenames.gi_secondary_thermistor].values.astype(float)
t3y = df[variablenames.gi_primary_thermistor].values.astype(float)
except ValueError as e:
if te:
try:
vpy = df[variablenames.gi_secondary_thermistor].values.astype(float)
except:
print("\n***ERROR: Backup to secondary thermistor failed")
print("ADVISORY: Setting secondary temperatures to 1K, hoping for the best\n")
vpy = numpy.array([1 for i in range(len(df[variablenames.gi_time]))])
try:
t3y = df[variablenames.gi_primary_thermistor].values.astype(float)
except:
print("\n***ERROR: Backup to secondary thermistor failed")
print("ADVISORY: Setting secondary temperatures to 1K, hoping for the best\n")
t3y = numpy.array([1 for i in range(len(df[variablenames.gi_time]))])
#If you got here, then you're missing some critical thermometry data
# in a global analysis csv.
else:
print(e)
print("WARNING: Using", variablenames.gi_secondary_thermistor, "as failsafe.")
try:
vpy = df[variablenames.gi_secondary_thermistor].values.astype(float)
t3y = df[variablenames.gi_primary_thermistor].values.astype(float)
except:
print("\n***ERROR: Backup to the backup temperature failed")
print("ADVISORY: Setting all temperatures to 1K, hoping for the best\n")
t3y = numpy.array([1 for i in range(len(df[variablenames.gi_time]))])
vpy=t3y
sweep_centroids = df[variablenames.gi_centroid].values.astype(float)
sweep_width = df[variablenames.gi_width].values.astype(float)
teval = df[variablenames.gi_TE].values.astype(float) # unscaled
else:
# Prevanalized is a toggleable function that allows
# for a tertiary analysis on a dataset that has
# already been analyzed by the global_interpreter
#
# This is the portion for the tertiary analysis
with open(prevanalized, 'r') as f:
df = pandas.read_csv(f)
df[variablenames.gi_time] = | pandas.to_datetime(df[variablenames.gi_time], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
import pandas as pd
from pprint import pprint
from jellyfish import jaro_distance
import unidecode
from _Classes.PyscopusModified import ScopusModified
from _Classes.Author import Author
from _Classes.Individuals import Student, Egress
from _Classes.Indicators import Indicators
from _Funções_e_Valores.verify_authors import search_authors_list, treat_exceptions
from _Funções_e_Valores._exceptions import scopus_articles_exceptions
from _Funções_e_Valores.values import quadrennium, FILE, HAS_EVENTS, FULL_PERIOD_AUTHORS, REQUEST_SCOPUS_DATA, EGRESS, SCOPUS_APIKEY
class Data():
def __init__(self, professors, egress, students, qualis_2016, qualis_2020, qualis_2016_events, qualis_2020_events):
super(Data, self).__init__()
self.professors = professors
self.egress = egress
self.students = students
self.qualis_2016 = qualis_2016
self.qualis_2020 = qualis_2020
self.qualis_2016_events = qualis_2016_events
self.qualis_2020_events = qualis_2020_events
self.exceptions = {'Nome Trabalho':[], 'Nome Evento Cadastrado':[], 'Nome Evento Canônico':[]} # For the exceptions sheet from the excel file
self.reports = {'Author':[], 'Report':[]} # Reports by author
self.authors_dict = {"Author":[], "A/E":[]} # Dictionary of authors (Professors, Students and Egress)
columns = []
for year in quadrennium:
if year not in columns:
columns.append(year)
for col in columns:
self.authors_dict[f"20{col}"] = []
self.art_prof = pd.DataFrame() # Articles by professor
self.authors_average = [] # List with the "average number of authors per article" of each professor
self.irestritos_2016 = {'Total com trava':None, 'Total sem trava':None, 'Anais com trava':None, 'Anais sem trava':None, 'Periódicos':None}
self.igerais_2016 = {'Total com trava':None, 'Total sem trava':None, 'Anais com trava':None, 'Anais sem trava':None, 'Periódicos':None}
self.authors_indicators_2016 = [] # Indicators of each professor qualis 2016
self.authors_indicators_2019 = [] # Indicators of each professor qualis 2019
self.general_indicators_2016 = [] # Indicators for all professors together qualis 2016
self.general_indicators_2019 = [] # Indicators for all professors together qualis 2019
self.authors_indicators_2016_journals = [] # Indicators of each professor qualis 2016 (Journals)
self.authors_indicators_2019_journals = [] # Indicators of each professor qualis 2019 (Journals)
self.general_indicators_2016_journals = [] # Indicators for all professors together qualis 2016 (Journals)
self.general_indicators_2019_journals = [] # Indicators for all professors together qualis 2019 (Journals)
self.authors_indicators_2016_proceedings = [] # Indicators of each professor qualis 2016 (Proceedings)
self.authors_indicators_2019_proceedings = [] # Indicators of each professor qualis 2019 (Proceedings)
self.general_indicators_2016_proceedings = [] # Indicators for all professors together qualis 2016 (Proceedings)
self.general_indicators_2019_proceedings = [] # Indicators for all professors together qualis 2019 (Proceedings)
self.journals_a1_a4_2019 = None
self.journals_a1_a4_SE_2019 = None
self.journals_a1_a4_2016 = None
self.journals_a1_a4_SE_2016 = None
self.journal_metrics_2019 = None
self.journal_metrics_2016 = None
self.proceedings_metrics_2019 = None
self.proceedings_metrics_2016 = None
def treat_data(self):
# Get the list of egress and students with their names and active-period
egress = Egress(self.egress, quadrennium)
self.egress_list = egress.get_egress_list()
students = Student(self.students, quadrennium)
self.students_list = students.get_students_list()
if HAS_EVENTS == True:
# Lowercase events
for pos, i in enumerate(self.qualis_2016_events['Nome Padrão']):
self.qualis_2016_events['Nome Padrão'][pos] = str(self.qualis_2016_events['Nome Padrão'][pos]).lower()
for pos, i in enumerate(self.qualis_2020_events['Nome Padrão']):
self.qualis_2020_events['Nome Padrão'][pos] = str(self.qualis_2020_events['Nome Padrão'][pos]).lower()
# Remove "-" from ISSN
for i in range(len(self.qualis_2016["ISSN"])):
self.qualis_2016["ISSN"][i] = self.qualis_2016["ISSN"][i].replace("-", "")
for i in range(len(self.qualis_2020["ISSN"])):
self.qualis_2020["ISSN"][i] = self.qualis_2020["ISSN"][i].replace("-", "")
def get_author_period(self, pos):
if FULL_PERIOD_AUTHORS == True:
period = {}
for year in quadrennium:
period[year] = True
else:
period = {}
for year in quadrennium:
period[year] = False
if EGRESS == True:
start = str(self.professors['Ingresso'][pos])[7:]
start = start.replace('-', '')
end = quadrennium[-1] # There's no limit
else:
if FILE == "UFSC 2017-2020":
start = str(self.professors["Início do Vínculo"][pos])[2:4]
else:
start = str(self.professors["Início do Vínculo"][pos])[8:]
end = str(self.professors["Fim do Vínculo"][pos])
if end == "-":
end = quadrennium[-1]
else:
if FILE == "UFSC 2017-2020":
end = str(self.professors["Fim do Vínculo"][pos])[2:4]
else:
end = str(self.professors["Fim do Vínculo"][pos])[8:]
if int(end) > int(quadrennium[-1]):
end = quadrennium[-1]
start_position = None
end_position = None
for pos, key in enumerate(period.keys()): # For each year of the quadrennium
if pos == 0 and int(start) < int(quadrennium[0]): # If the start year is lower than the first year of the quadrennium
start = quadrennium[0]
if key == start:
start_position = pos # The position of the start year on the quadrennium
if key == end:
end_position = pos # The position of the end year on the quadrennium
for pos, key in enumerate(period.keys()):
if int(end) >= int(quadrennium[0]):
if pos >= start_position and pos <= end_position: # The start year, the end year and the years in between are true
period[key] = True
return period
def get_authors_reports(self):
# Iterates through the professors
for pos, professor in enumerate(self.professors["Nome"]):
if str(professor) != 'nan':
professor = str(professor)
period = self.get_author_period(pos) # Get the period of valid publications
author = Author(professor, period, self.qualis_2016, self.qualis_2020, self.qualis_2016_events, self.qualis_2020_events, self.professors, self.authors_dict["Author"])
# print(professor)
# print(pd.DataFrame(author.info))
self.authors_dict["Author"] = author.authors_list # Updates the authors list
self.reports['Author'].append(professor) # Adds the professor to the list of reports
self.reports['Report'].append(pd.DataFrame(author.info)) # Adds the professor's report to the list of reports
self.authors_average.append(author.get_authors_average()) # Adds the "average number of authors per article" to the list of averages
for title in author.exceptions['Nome Trabalho']:
self.exceptions['Nome Trabalho'].append(title)
for event_registered in author.exceptions['Nome Evento Cadastrado']:
self.exceptions['Nome Evento Cadastrado'].append(event_registered)
for canon_event in author.exceptions['Nome Evento Canônico']:
self.exceptions['Nome Evento Canônico'].append(canon_event)
self.exceptions = pd.DataFrame(self.exceptions)
def treat_names(self): # Looks for convergence between names written in different ways and replaces for the right name
egress_names = []
for egress in self.egress_list:
egress_names.append(treat_exceptions(egress.name.strip()))
students_names = []
for student in self.students_list:
students_names.append(treat_exceptions(student.name.strip()))
for pos, report in enumerate(self.reports["Report"]):
# df = pd.DataFrame(report)
# for index, row in df.iterrows():
for index, row in report.iterrows():
for column in row.index:
if "Autor" in str(column): # Goes through the authors columns
if self.reports["Report"][pos][column][index] != " ":
_, self.reports["Report"][pos].loc[index, column] = search_authors_list(self.authors_dict["Author"], str(self.reports["Report"][pos][column][index]))
_, self.reports["Report"][pos].loc[index, column] = search_authors_list(egress_names, str(self.reports["Report"][pos][column][index]))
_, self.reports["Report"][pos].loc[index, column] = search_authors_list(students_names, str(self.reports["Report"][pos][column][index]))
def get_art_prof(self):
for pos, report in enumerate(self.reports["Report"]):
name_column = [self.reports["Author"][pos] for i in range(len(report))] # Generates a column with the name of the author for each article
report_copy = report.copy() # A copy of the report
report_copy.insert(loc=0, column='Nome', value=name_column) # Adds the name_column
if pos == 0:
self.art_prof = report_copy
else:
self.art_prof = pd.concat([self.art_prof, report_copy], ignore_index=True, sort=False) # Puts the reports together, in one dataframe
# Replace "nan" values with " "
for col in self.art_prof.columns:
if "Autor" in col:
for pos, i in enumerate(self.art_prof[col]):
if str(i) == "NaN" or str(i) == "nan":
self.art_prof.loc[pos, col] = " "
def update_authors_dict(self):
egress_names = []
for egress in self.egress_list:
egress_names.append(treat_exceptions(egress.name.strip())) # Gets the egress' name
students_names = []
for student in self.students_list:
students_names.append(treat_exceptions(student.name.strip())) # Gets the student's name
columns = []
for year in quadrennium:
if year not in columns:
columns.append(year)
# Looks for egress or students and marks them with a X in the "A/E" column
for author in self.authors_dict["Author"]:
if author in egress_names or author in students_names:
self.authors_dict["A/E"].append("X")
else:
self.authors_dict["A/E"].append("")
for col in columns:
self.authors_dict[f"20{col}"].append("")
result_df = self.art_prof.apply(lambda x: x.astype(str).str.lower()).drop_duplicates(subset="Título")
publications = self.art_prof.loc[result_df.index]
for index, row in publications.iterrows():
for column in row.index:
if "Autor" in str(column): # Goes through the authors columns
for pos, author in enumerate(self.authors_dict["Author"]):
if author == row[column]:
year = row["Ano"]
if "." in str(year):
year = str(year).replace(".0", "")
year = int(year)
self.authors_dict[str(year)][pos] = "X"
def get_indicators(self):
all_publications = self.artppg.copy()
indicators_2016 = Indicators(self.egress_list, self.students_list, all_publications, "CC 2016", general=True)
gen_ind_2016, gen_ind_2016_journals, gen_ind_2016_proceedings = indicators_2016.get_indicators_2016()
self.general_indicators_2016 = gen_ind_2016
self.general_indicators_2016_journals = gen_ind_2016_journals
self.general_indicators_2016_proceedings = gen_ind_2016_proceedings
self.irestritos_2016 = indicators_2016.irestritos
self.igerais_2016 = indicators_2016.igerais
indicators_2019 = Indicators(self.egress_list, self.students_list, all_publications, "2019", general=True)
gen_ind_2019, gen_ind_2019_journals, gen_ind_2019_proceedings = indicators_2019.get_indicators_2019()
self.general_indicators_2019 = gen_ind_2019
self.general_indicators_2019_journals = gen_ind_2019_journals
self.general_indicators_2019_proceedings = gen_ind_2019_proceedings
self.irestritos_2019 = indicators_2019.irestritos
self.igerais_2019 = indicators_2019.igerais
for report in self.reports["Report"]:
indicators_2016 = Indicators(self.egress_list, self.students_list, report, "CC 2016")
authors_ind_2016, authors_ind_2016_journals, authors_ind_2016_proceedings = indicators_2016.get_indicators_2016()
self.authors_indicators_2016.append(authors_ind_2016)
self.authors_indicators_2016_journals.append(authors_ind_2016_journals)
self.authors_indicators_2016_proceedings.append(authors_ind_2016_proceedings)
indicators_2019 = Indicators(self.egress_list, self.students_list, report, "2019")
authors_ind_2019, authors_ind_2019_journals, authors_ind_2019_proceedings = indicators_2019.get_indicators_2019()
self.authors_indicators_2019.append(authors_ind_2019)
self.authors_indicators_2019_journals.append(authors_ind_2019_journals)
self.authors_indicators_2019_proceedings.append(authors_ind_2019_proceedings)
def analyze_journal_classifications(self, qualis_year):
journals_a1_a4_list = [] # Journals A1-A4
journals_a1_a4_SE = [] # Journals A1-A4 with students and/or egress
journals_a1_b1_list = [] # Journals A1-B1
journals_a1_b1_SE = [] # Journals A1-B1 with students and/or egress
for pos, report in enumerate(self.reports["Report"]):
# Separates by journal classifications
journals = report.loc[report["Tipo"] == "Periódico"] # All the publications in journals
journals_a1 = journals.loc[journals[f"Qualis {qualis_year}"] == "A1"]
journals_a2 = journals.loc[journals[f"Qualis {qualis_year}"] == "A2"]
if qualis_year == "2019":
journals_a3 = journals.loc[journals[f"Qualis {qualis_year}"] == "A3"]
journals_a4 = journals.loc[journals[f"Qualis {qualis_year}"] == "A4"]
journals_a1_a4 = pd.concat([journals_a1, journals_a2, journals_a3, journals_a4], ignore_index=True, sort=False)
# Calculates the amount of articles A1-A4 with and without students/egress
amount_journals_a1_a4 = len(journals_a1_a4.index)
journals_a1_a4_list.append(amount_journals_a1_a4)
indicators = Indicators(self.egress_list, self.students_list, journals_a1_a4, qualis_year)
amount_journals_a1_a4_SE = indicators.get_SE(journals_a1_a4)
journals_a1_a4_SE.append(amount_journals_a1_a4_SE)
elif qualis_year == "CC 2016":
journals_b1 = journals.loc[journals[f"Qualis {qualis_year}"] == "B1"]
journals_a1_b1 = pd.concat([journals_a1, journals_a2, journals_b1], ignore_index=True, sort=False)
# Calculates the amount of articles A1-B1 with and without students/egress
amount_journals_a1_b1 = len(journals_a1_b1.index)
journals_a1_b1_list.append(amount_journals_a1_b1)
indicators = Indicators(self.egress_list, self.students_list, journals_a1_b1, qualis_year)
amount_journals_a1_b1_SE = indicators.get_SE(journals_a1_b1)
journals_a1_b1_SE.append(amount_journals_a1_b1_SE)
if qualis_year == "2019":
return (journals_a1_a4_list, journals_a1_a4_SE)
elif qualis_year == "CC 2016":
return (journals_a1_b1_list, journals_a1_b1_SE)
def analyze_journals(self):
all_publications = self.artppg.copy()
self.journals = all_publications.copy().loc[all_publications["Tipo"] == "Periódico"] # All the publications in journals
self.journals.loc[:, 'Quantidade'] = self.journals["Nome de Publicação"].map(self.journals["Nome de Publicação"].value_counts()) # Calculates the number of times the journal appears and add that number to a column
columns = ["Nome de Publicação", "ISSN/SIGLA", "Qualis CC 2016", "Qualis 2019", "Scopus 2019", "Quantidade"] # The columns we're gonna use
drop_columns = []
for column in self.journals.columns:
if column not in columns:
drop_columns.append(column)
self.journals = self.journals.drop(columns=drop_columns)
self.journals = self.journals.rename(columns={"ISSN/SIGLA": "ISSN"})
self.journals = self.journals.drop_duplicates(subset="ISSN") # Drop all the duplicated journals
def analyze_journal_metrics(self, qualis_year):
journal_metrics = | pd.DataFrame(columns=[f"Métrica {qualis_year}", "Qtd.", "Qtd. %"]) | pandas.DataFrame |
from core import BasePipeline
from utils import rando_name, r_squared
from uuid import uuid4
import pandas as pd
import numpy as np
# Random Forest Regressor pipeline #############################################################################################################################################3
class RFRegressorPipeline(BasePipeline):
def __init__(self, name, n_trees = 8, client = None):
params = {"n_trees": n_trees,}
BasePipeline.__init__(self, name, "raw_rfr", client, params)
def run(self, X, Y, model):
return BasePipeline.run(self, X = X, Y = Y, model = model)
# Random Forest Regressor model; if pipeline == None then create a pipeline for use with this model
class RFRegressor():
def __init__(self, X = None, Y = None, name = None, pipeline = None, n_trees = 8, client = None):
self.client = client
self.type = "raw_rfr"
if X is not None and Y is not None:
self.__full_init__(X, Y, name, pipeline, n_trees, client)
else:
self.__lazy_init__(name)
def __full_init__(self, X, Y, name = None, pipeline = None, n_trees = 8, client = None):
self.n_trees = n_trees
if name == None:
name = rando_name()
# else:
# todo: add feature to instantiate RFRegressor just from name
# i.e. an already created model
self.name = name
self.X = X
self.Y = Y
self.pipeline = pipeline
if self.pipeline == None:
pipeline_name = rando_name()
self.pipeline = RFRegressorPipeline(pipeline_name, n_trees, client)
self.response = self.pipeline.run(X = self.X, Y = self.Y, model = self.name)
try:
# model will be key if success
model = self.response['model']
self.name = model.split("/")[-1]
except:
# something went wrong creating the model
raise StandardError(self.response)
# lazy loading for already persisted models
def __lazy_init__(self, model_name):
self.name = model_name
def predict(self, point):
# todo: predict class given new point
extra_params = {"features": point.values.tolist()}
response = self.client.call_model(model = self.name,
type = self.type,
method = "predict",
extra_params = extra_params)
try:
return pd.DataFrame(response['predict'])
except:
raise StandardError(response)
def score(self):
Y_hat = self.predict(self.X)
return r_squared(Y_hat, self.Y)
# K Nearest Neighbors regressor pipeline
class KNNRegressorPipeline(BasePipeline):
def __init__(self, name, K = 5, kernel = "euclidean", algo = "auto", weights = "uniform", kernel_params = {}, client = None):
params = {"k": K,
"kernel": kernel,
"algo": algo,
"weights": weights,
"kernel_params": kernel_params}
BasePipeline.__init__(self, name, "raw_knn_regressor", client, params)
def run(self, X, Y, model):
return BasePipeline.run(self, X = X, Y = Y, model = model)
class KNNRegressor():
def __init__(self, X = None, Y = None, name = None, pipeline = None, K = 5, kernel = "euclidean", algo = "auto", weights = "uniform", kernel_params = {}, client = None):
self.client = client
self.type = "raw_knn_regressor"
if X is not None and Y is not None:
self.__full_init__(X, Y, name, pipeline, K, kernel, algo, weights, kernel_params, client)
else:
self.__lazy_init__(name)
def __full_init__(self, X, Y, name = None, pipeline = None, K = 5, kernel = "euclidean", algo = "auto", weights = "uniform", kernel_params = {}, client = None):
self.X = X
self.Y = Y
if name == None:
name = rando_name()
self.name = name
self.pipeline = pipeline
self.K = K
self.kernel = kernel
self.kernel_params = kernel_params
self.algo = algo
self.weights = weights
if self.pipeline == None:
pipeline_name = rando_name()
self.pipeline = KNNRegressorPipeline(pipeline_name, K, kernel, algo, weights, kernel_params, client)
self.response = self.pipeline.run(X = self.X, Y = self.Y, model = self.name)
try:
# model will be key if success
model = self.response['model']
self.name = model.split("/")[-1]
except:
# something went wrong creating the model
raise StandardError(self.response)
# lazy loading for already persisted models
def __lazy_init__(self, model_name):
self.name = model_name
def predict(self, point):
extra_params = {"features": point.values.tolist()}
response = self.client.call_model(model = self.name,
type = self.type,
method = "predict",
extra_params = extra_params)
try:
return | pd.DataFrame(response['predict']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import demjson
import pandas as pd
import requests
from zvt.contract import IntervalLevel
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.utils.time_utils import to_time_str
from zvt import init_log
from zvt.api.quote import generate_kdata_id
from zvt.api import get_kdata
from zvt.domain import Index, Etf1dKdata
from zvt.recorders.consts import EASTMONEY_ETF_NET_VALUE_HEADER
class ChinaETFDayKdataRecorder(FixedCycleDataRecorder):
entity_provider = 'exchange'
entity_schema = Index
provider = 'sina'
data_schema = Etf1dKdata
url = 'http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/CN_MarketData.getKLineData?' \
'symbol={}{}&scale=240&&datalen={}&ma=no'
def __init__(self, entity_type='index', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=10, default_size=2000, real_time=True, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None,
level=IntervalLevel.LEVEL_1DAY, kdata_use_begin_time=False, close_hour=0, close_minute=0,
one_day_trading_minutes=24 * 60) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, level, kdata_use_begin_time, one_day_trading_minutes)
def get_data_map(self):
return {}
def generate_domain_id(self, entity, original_data):
return generate_kdata_id(entity_id=entity.id, timestamp=original_data['timestamp'], level=self.level)
def on_finish_entity(self, entity):
kdatas = get_kdata(entity_id=entity.id, level=IntervalLevel.LEVEL_1DAY.value,
order=Etf1dKdata.timestamp.asc(),
return_type='domain', session=self.session,
filters=[Etf1dKdata.cumulative_net_value.is_(None)])
if kdatas and len(kdatas) > 0:
start = kdatas[0].timestamp
end = kdatas[-1].timestamp
# 从东方财富获取基金累计净值
df = self.fetch_cumulative_net_value(entity, start, end)
if df is not None and not df.empty:
for kdata in kdatas:
if kdata.timestamp in df.index:
kdata.cumulative_net_value = df.loc[kdata.timestamp, 'LJJZ']
kdata.change_pct = df.loc[kdata.timestamp, 'JZZZL']
self.session.commit()
self.logger.info(f'{entity.code} - {entity.name}累计净值更新完成...')
def fetch_cumulative_net_value(self, security_item, start, end) -> pd.DataFrame:
query_url = 'http://api.fund.eastmoney.com/f10/lsjz?' \
'fundCode={}&pageIndex={}&pageSize=200&startDate={}&endDate={}'
page = 1
df = pd.DataFrame()
while True:
url = query_url.format(security_item.code, page, to_time_str(start), to_time_str(end))
response = requests.get(url, headers=EASTMONEY_ETF_NET_VALUE_HEADER)
response_json = demjson.decode(response.text)
response_df = pd.DataFrame(response_json['Data']['LSJZList'])
# 最后一页
if response_df.empty:
break
response_df['FSRQ'] = pd.to_datetime(response_df['FSRQ'])
response_df['JZZZL'] = pd.to_numeric(response_df['JZZZL'], errors='coerce')
response_df['LJJZ'] = pd.to_numeric(response_df['LJJZ'], errors='coerce')
response_df = response_df.fillna(0)
response_df.set_index('FSRQ', inplace=True, drop=True)
df = | pd.concat([df, response_df]) | pandas.concat |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
g = df.groupby("A")
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
df.loc[[2, 6], "B"] = min_val
expected.loc[[2, 3, 6, 7], "B"] = min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import os
from io import BytesIO
import io
import csv
import requests
from zipfile import ZipFile
from urllib.request import urlopen
import pandas as pd
from pathlib import Path
NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT'))
print(NEO4J_IMPORT)
CACHE = Path(NEO4J_IMPORT / 'cache')
CACHE.mkdir(exist_ok=True)
def import_countries():
country_url = 'https://download.geonames.org/export/dump/countryInfo.txt'
names = ['ISO','ISO3','ISO-Numeric','fips','Country','Capital','Area(in sq km)','Population',
'Continent','tld','CurrencyCode','CurrencyName','Phone','Postal Code Format',
'Postal Code Regex','Languages','geonameid','neighbours','EquivalentFipsCode'
]
countries = pd.read_csv(country_url, sep='\t',comment='#', dtype='str', names=names)
# Add missing ISO code for nambia
index = countries.query("ISO3 == 'NAM'").index
countries.at[index, 'ISO'] = 'NA'
countries['id'] = countries['ISO'] # standard id column to link nodes
countries.rename(columns={'ISO': 'iso'}, inplace=True)
countries.rename(columns={'ISO3': 'iso3'}, inplace=True)
countries.rename(columns={'ISO-Numeric': 'isoNumeric'}, inplace=True)
countries.rename(columns={'Country': 'name'}, inplace=True)
countries.rename(columns={'Population': 'population'}, inplace=True)
countries.rename(columns={'Area(in sq km)': 'areaSqKm'}, inplace=True)
countries.rename(columns={'geonameid': 'geonameId'}, inplace=True)
countries.rename(columns={'Continent': 'parentId'}, inplace=True)
countries = countries[['id','name','iso','iso3','isoNumeric', 'parentId', 'areaSqKm','geonameId', 'neighbours']].copy()
countries.fillna('', inplace=True)
countries.to_csv(NEO4J_IMPORT / "00e-GeoNamesCountry.csv", index=False)
def import_admin1():
admin1_url = 'https://download.geonames.org/export/dump/admin1CodesASCII.txt'
names = ['code', 'name', 'name_ascii', 'geonameid']
admin1 = pd.read_csv(admin1_url, sep='\t', dtype='str', names=names)
admin1 = admin1[['code', 'name_ascii', 'geonameid']]
admin1.rename(columns={'code': 'id'}, inplace=True) # standard id column to link nodes
admin1.rename(columns={'name_ascii': 'name'}, inplace=True)
admin1.rename(columns={'geonameid': 'geonameId'}, inplace=True)
admin1['code'] = admin1['id'].str.split('.', expand=True)[1]
admin1['parentId'] = admin1['id'].str.split('.', expand=True)[0]
admin1['name'] = admin1['name'].str.replace('Washington, D.C.', 'District of Columbia')
admin1 = admin1[['id','name','code','parentId', 'geonameId']]
admin1.fillna('', inplace=True)
admin1.to_csv(NEO4J_IMPORT / "00f-GeoNamesAdmin1.csv", index=False)
def import_admin2():
admin2_url = 'https://download.geonames.org/export/dump/admin2Codes.txt'
names = ['code', 'name', 'name_ascii', 'geonameid']
admin2 = pd.read_csv(admin2_url, sep='\t', dtype='str', names=names)
admin2 = admin2[['code', 'name_ascii', 'geonameid']]
admin2.rename(columns={'code': 'id'}, inplace=True) # standard id column to link nodes
admin2.rename(columns={'name_ascii': 'name'}, inplace=True)
admin2.rename(columns={'geonameid': 'geonameId'}, inplace=True)
admin2['parentId'] = admin2['id'].str.rsplit('.', 1, expand=True)[0]
admin2.loc[admin2['id'] == 'US.DC.001', 'name'] = 'District of Columbia'
admin2.loc[admin2['id'] == 'US.CA.075', 'name'] = '<NAME>'
admin2.to_csv(NEO4J_IMPORT / "00g-GeoNamesAdmin2.csv", index=False)
def get_location_id(country, admin1, admin2):
location = country
if admin1 != '':
location = location + '.' + admin1
if admin2 != '':
location = location + '.' + admin2
return location
def import_cities():
urls = ['https://download.geonames.org/export/dump/cities15000.zip', 'https://download.geonames.org/export/dump/cities5000.zip', 'https://download.geonames.org/export/dump/cities1000.zip', 'https://download.geonames.org/export/dump/cities500.zip']
names = [
'geonameid','name','asciiname','alternatenames','latitude','longitude','feature class',
'feature code','country code','cc2','admin1 code','admin2 code','admin3 code','admin4 code',
'population','elevation','dem','timezone','modification date'
]
dfs = []
for url in urls:
file_name = url.split('/')[-1].split('.')[0] + '.txt'
resp = urlopen(url)
zipfile = ZipFile(BytesIO(resp.read()))
city_df = pd.read_csv(zipfile.open(file_name), sep="\t", low_memory=False, names=names)
dfs.append(city_df)
city = pd.concat(dfs)
city = city[['geonameid', 'asciiname', 'country code', 'admin1 code', 'admin2 code']]
city.fillna('', inplace=True)
city.drop_duplicates('geonameid', inplace=True)
city.rename(columns={'geonameid': 'geonameId'}, inplace=True)
city['id'] = city['geonameId']
city.rename(columns={'asciiname': 'name'}, inplace=True)
city['parentId'] = city.apply(lambda row: get_location_id(row['country code'],
row['admin1 code'],
row['admin2 code']), axis=1)
city = city[['id', 'name', 'parentId', 'geonameId']]
city.fillna('', inplace=True)
city.to_csv(NEO4J_IMPORT / "00h-GeoNamesCity.csv", index=False)
def import_UNRegions():
url = "https://unstats.un.org/unsd/methodology/m49/overview"
df = pd.read_html(url, attrs={"id": "downloadTableEN"})[0]
df.rename(columns={
"Region Name": "UNRegion",
"Region Code": "UNRegionCode",
"Sub-region Name": "UNSubRegion",
"Sub-region Code": "UNSubRegionCode",
"Intermediate Region Name": "UNIntermediateRegion",
"Intermediate Region Code": "UNIntermediateRegionCode",
"ISO-alpha3 Code": "iso3",
}, inplace=True)
additions = pd.read_csv("/home/pseudo/Coding/GeoGraph/reference_data/UNRegionAdditions.csv")
additions.fillna('', inplace=True)
df = df.append(additions)
df = df.fillna("").astype(str)
df['UNRegionCode'] = 'm49:' + df['UNRegionCode']
df['UNSubRegionCode'] = 'm49:' + df['UNSubRegionCode']
df['UNIntermediateRegionCode'] = 'm49:' + df['UNIntermediateRegionCode']
# Export All
df.to_csv(NEO4J_IMPORT / "00k-UNAll.csv", index=False)
# Export Intermediate Regions
intermediateRegion = df[df['UNIntermediateRegion'] != '']
intermediateRegion.to_csv(NEO4J_IMPORT / "00k-UNIntermediateRegion.csv", index=False)
# Export Sub-regions
subRegion = df[(df['UNSubRegion'] != '') & (df['UNIntermediateRegion'] == '')]
subRegion.to_csv(NEO4J_IMPORT / "00k-UNSubRegion.csv", index=False)
# Export last
region = df[(df['UNSubRegion'] == '') & (df['UNIntermediateRegion'] == '')]
region.to_csv(NEO4J_IMPORT / "00k-UNRegion.csv", index=False)
def add_data():
"""
adds latitude, longitude, elevation, and population data from GeoNames
to Country, Admin1, Admin2, and City .csv files for ingestion into the Knowledge Graph
"""
country_url = 'https://download.geonames.org/export/dump/allCountries.zip'
content = requests.get(country_url)
zf = ZipFile(BytesIO(content.content))
for item in zf.namelist():
print("File in zip: "+ item)
# Intermediate data cached here
encoding = 'utf-8'
path = CACHE / 'allCountries.csv'
try:
with zf.open('allCountries.txt') as readfile:
with open(path, "w") as file_out:
writer = csv.writer(file_out)
for line in io.TextIOWrapper(readfile, encoding):
row = line.strip().split("\t")
if row[6] == 'A' or row[6] == 'P':
writer.writerow([row[0], row[4], row[5], row[14], row[15]])
except:
print('Download of allCountries.txt failed, using cached version of data')
columns = ['geonameId', 'latitude', 'longitude', 'population', 'elevation']
# If data download failed cached file from past run is used
df = pd.read_csv(path, names=columns, dtype='str', header=0)
df.fillna('', inplace=True)
df['population'] = df['population'].str.replace('0', '')
dfc = df[['geonameId', 'latitude', 'longitude', 'population']]
country = pd.read_csv(NEO4J_IMPORT / "00e-GeoNamesCountry.csv", dtype='str')
country = pd.merge(country, dfc, on='geonameId', how='left')
country.fillna('', inplace=True)
# reset the id and iso code for Namibi.
index = country.query("iso3 == 'NAM'").index
country.at[index, 'iso'] = 'NA'
country.at[index, 'id'] = 'NA'
country.to_csv(NEO4J_IMPORT / "00e-GeoNamesCountry.csv", index=False)
# Add data to admin1 csv
admin1 = pd.read_csv(NEO4J_IMPORT / "00f-GeoNamesAdmin1.csv", dtype='str')
admin1 = pd.merge(admin1, df, on='geonameId', how='left')
admin1.fillna('', inplace=True)
admin1.to_csv(NEO4J_IMPORT / "00f-GeoNamesAdmin1.csv", index=False)
# Add data for admin2 csv
admin2 = pd.read_csv(NEO4J_IMPORT / "00g-GeoNamesAdmin2.csv", dtype='str')
admin2 = pd.merge(admin2, df, on='geonameId', how='left')
admin2.fillna('', inplace=True)
admin2.to_csv(NEO4J_IMPORT / "00g-GeoNamesAdmin2.csv", index=False)
# Add data for cities csv
city = pd.read_csv(NEO4J_IMPORT / "00h-GeoNamesCity.csv", dtype='str')
city = | pd.merge(city, df, on='geonameId', how='left') | pandas.merge |
import builtins
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
| tm.assert_frame_equal(result, exp_df) | pandas._testing.assert_frame_equal |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_projection(mssql_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_spja(mssql_url: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table AS a, test_str AS b
WHERE a.test_int = b.id AND test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(mssql_url, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64"),
},
)
df = df.sort_values("sum").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_some_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < 1"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([0], dtype="int64"),
"test_nullint": pd.Series([5], dtype="Int64"),
"test_str": pd.Series(["a"], dtype="object"),
"test_float": pd.Series([3.1], dtype="float"),
"test_bool": | pd.Series([None], dtype="boolean") | pandas.Series |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.contact_models.contact_model_functions import _draw_nr_of_contacts
from src.contact_models.contact_model_functions import _draw_potential_vacation_contacts
from src.contact_models.contact_model_functions import (
_identify_ppl_affected_by_vacation,
)
from src.contact_models.contact_model_functions import (
calculate_non_recurrent_contacts_from_empirical_distribution,
)
from src.contact_models.contact_model_functions import go_to_daily_work_meeting
from src.contact_models.contact_model_functions import go_to_weekly_meeting
from src.contact_models.contact_model_functions import meet_daily_other_contacts
from src.contact_models.contact_model_functions import reduce_contacts_on_condition
from src.shared import draw_groups
@pytest.fixture
def params():
params = pd.DataFrame()
params["category"] = ["work_non_recurrent"] * 2 + ["other_non_recurrent"] * 2
params["subcategory"] = [
"symptomatic_multiplier",
"positive_test_multiplier",
] * 2
params["name"] = ["symptomatic_multiplier", "positive_test_multiplier"] * 2
params["value"] = [0.0, 0.0, 0.0, 0.0]
params.set_index(["category", "subcategory", "name"], inplace=True)
return params
@pytest.fixture
def states():
"""states DataFrame for testing purposes.
Columns:
- date: 2020-04-01 - 2020-04-30
- id: 50 individuals, with 30 observations each. id goes from 0 to 49.
- immune: bool
- infectious: bool
- age_group: ordered Categorical, either 10-19 or 40-49.
- region: unordered Categorical, ['Overtjssel', 'Drenthe', 'Gelderland']
- n_has_infected: int, 0 to 3.
- cd_infectious_false: int, -66 to 8.
- occupation: Categorical. "working" or "in school".
- cd_symptoms_false: int, positive for the first 20 individuals, negative after.
"""
this_modules_path = Path(__file__).resolve()
states = pd.read_parquet(this_modules_path.parent / "1.parquet")
old_to_new = {old: i for i, old in enumerate(sorted(states["id"].unique()))}
states["id"].replace(old_to_new, inplace=True)
states["age_group"] = pd.Categorical(
states["age_group"], ["10 - 19", "40 - 49"], ordered=True
)
states["age_group"] = states["age_group"].cat.rename_categories(
{"10 - 19": "10-19", "40 - 49": "40-49"}
)
states["region"] = pd.Categorical(
states["region"], ["Overtjssel", "Drenthe", "Gelderland"], ordered=False
)
states["date"] = pd.to_datetime(states["date"], format="%Y-%m-%d", unit="D")
states["n_has_infected"] = states["n_has_infected"].astype(int)
states["cd_infectious_false"] = states["cd_infectious_false"].astype(int)
states["occupation"] = states["age_group"].replace(
{"10-19": "in school", "40-49": "working"}
)
states["cd_symptoms_false"] = list(range(1, 21)) + list(range(-len(states), -20))
states["symptomatic"] = states["cd_symptoms_false"] >= 0
states["knows_infectious"] = False
states["knows_immune"] = False
states["cd_received_test_result_true"] = -100
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
return states
@pytest.fixture
def a_thursday(states):
a_thursday = states[states["date"] == "2020-04-30"].copy()
a_thursday["cd_symptoms_false"] = list(range(1, 21)) + list(
range(-len(a_thursday), -20)
)
a_thursday["symptomatic"] = a_thursday["cd_symptoms_false"] >= 0
a_thursday["work_recurrent_weekly"] = draw_groups(
df=a_thursday,
query="occupation == 'working'",
assort_bys=["region"],
n_per_group=20,
seed=484,
)
return a_thursday
@pytest.fixture
def no_reduction_params():
params = pd.DataFrame()
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params["value"] = 1.0
params = params.set_index(["subcategory", "name"])
return params
# ----------------------------------------------------------------------------
def test_go_to_weekly_meeting_wrong_day(a_thursday):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
contact_params = pd.DataFrame()
group_col_name = "group_col"
day_of_week = "Saturday"
seed = 3931
res = go_to_weekly_meeting(
a_thursday, contact_params, group_col_name, day_of_week, seed
)
expected = pd.Series(False, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False)
def test_go_to_weekly_meeting_right_day(a_thursday, no_reduction_params):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
res = go_to_weekly_meeting(
states=a_thursday,
params=no_reduction_params,
group_col_name="group_col",
day_of_week="Thursday",
seed=3931,
)
expected = pd.Series(False, index=a_thursday.index)
expected[:7] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekend(states, no_reduction_params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["work_saturday"] = [True, True] + [False] * (len(a_saturday) - 2)
a_saturday["work_daily_group_id"] = 333
res = go_to_daily_work_meeting(a_saturday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_saturday.index)
expected[:2] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday(a_thursday, no_reduction_params):
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (
len(a_thursday) - 7
)
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected.iloc[:7] = [True, True, False, True, True, False, True]
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday_with_reduction(
a_thursday, no_reduction_params
):
reduction_params = no_reduction_params
reduction_params["value"] = 0.0
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3, 3, 3] + [-1] * (
len(a_thursday) - 9
)
a_thursday.loc[1450:1458, "symptomatic"] = [
False,
False,
False,
False,
True,
False,
False,
False,
False,
]
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected[:9] = [True, True, False, True, False, False, True, False, True]
assert_series_equal(res, expected, check_names=False)
# --------------------------- Non Recurrent Contact Models ---------------------------
def test_non_recurrent_work_contacts_weekend(states, params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")]
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_saturday,
params=params.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=494,
)
assert_series_equal(res, pd.Series(data=0, index=a_saturday.index, dtype=float))
@pytest.fixture
def params_with_positive():
params = pd.DataFrame.from_dict(
{
"category": ["work_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
)
params = params.set_index(["category", "subcategory", "name"])
return params
def test_non_recurrent_work_contacts_no_random_no_sick(
a_thursday, params_with_positive
):
a_thursday["symptomatic"] = False
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=433,
)
expected = a_thursday["age_group"].replace({"10-19": 0.0, "40-49": 2.0})
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_no_random_no_sick_sat(
states, params_with_positive
):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["symptomatic"] = False
a_saturday["participates_saturday"] = [True, True, True] + [False] * (
len(a_saturday) - 3
)
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_saturday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends="participates",
query="occupation == 'working'",
seed=433,
)
expected = pd.Series(0, index=a_saturday.index)
expected[:2] = 2
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_no_random_with_sick(
a_thursday, params_with_positive
):
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=448,
)
expected = a_thursday["age_group"].replace({"10-19": 0.0, "40-49": 2.0})
expected[:20] = 0.0
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_random_with_sick(a_thursday):
np.random.seed(77)
params = pd.DataFrame.from_dict(
{
"category": ["work_non_recurrent"] * 4,
"subcategory": ["all"] * 2
+ ["symptomatic_multiplier", "positive_test_multiplier"],
"name": [
3,
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [0.5, 0.5, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=338,
)
assert (res[:20] == 0).all() # symptomatics
assert (res[a_thursday["occupation"] != "working"] == 0).all() # non workers
healthy_workers = (a_thursday["occupation"] == "working") & (
a_thursday["cd_symptoms_false"] < 0
)
assert res[healthy_workers].isin([2, 3]).all()
# ------------------------------------------------------------------------------------
def test_non_recurrent_other_contacts_no_random_no_sick(a_thursday):
a_thursday["symptomatic"] = False
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=334,
)
expected = pd.Series(data=2, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_other_contacts_no_random_with_sick(a_thursday):
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=332,
)
expected = pd.Series(data=2, index=a_thursday.index)
expected[:20] = 0
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_other_contacts_random_with_sick(a_thursday):
np.random.seed(770)
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 4,
"subcategory": ["all"] * 2
+ ["symptomatic_multiplier", "positive_test_multiplier"],
"name": [
3,
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [0.5, 0.5, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=474,
)
assert (res[:20] == 0).all() # symptomatics
assert res[a_thursday["cd_symptoms_false"] < 0].isin([2, 3]).all()
# --------------------------------- General Functions ---------------------------------
def test_draw_nr_of_contacts_always_five(states):
dist = pd.DataFrame(
data=[[4, 0, "all"], [5, 1, "all"]], columns=["name", "value", "subcategory"]
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=939)
expected = pd.Series(5.0, index=states.index)
assert_series_equal(res, expected, check_dtype=False)
def test_draw_nr_of_contacts_mean_5(states):
# this relies on the law of large numbers
np.random.seed(3499)
dist = pd.DataFrame(
[[4, 0.5, "all"], [6, 0.5, "all"]], columns=["name", "value", "subcategory"]
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=939)
assert res.isin([4, 6]).all()
assert res.mean() == pytest.approx(5, 0.01)
def test_draw_nr_of_contacts_differ_btw_ages(states):
dist = pd.DataFrame.from_dict(
{"name": [0, 6], "value": [1, 1], "subcategory": ["10-19", "40-49"]}
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=939)
assert (res[states["age_group"] == "10-19"] == 0).all()
assert (res[states["age_group"] == "40-49"] == 6).all()
def test_draw_nr_of_contacts_differ_btw_ages_random(states):
np.random.seed(24)
dist = pd.DataFrame(
data=[
[0, 0.5, "10-19"],
[1, 0.5, "10-19"],
[6, 0.5, "40-49"],
[7, 0.5, "40-49"],
],
columns=["name", "value", "subcategory"],
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=24)
young = res[states["age_group"] == "10-19"]
old = res[states["age_group"] == "40-49"]
assert young.isin([0, 1]).all()
assert old.isin([6, 7]).all()
assert young.mean() == pytest.approx(0.5, 0.05)
assert old.mean() == pytest.approx(6.5, 0.05)
# ------------------------------------------------------------------------------------
def test_reduce_non_recurrent_contacts_on_condition(states):
nr_of_contacts = pd.Series(data=10, index=states.index)
states["symptomatic"] = [True, True, True] + [False] * (len(states) - 3)
multiplier = 0.5
states.loc[:1, "quarantine_compliance"] = 0.3
expected = pd.Series([10, 10, 0] + [10] * (len(states) - 3))
res = reduce_contacts_on_condition(
contacts=nr_of_contacts,
states=states,
multiplier=multiplier,
condition="symptomatic",
is_recurrent=False,
)
assert_series_equal(res, expected, check_dtype=False)
def test_reduce_recurrent_contacts_on_condition(states):
participating = pd.Series(data=True, index=states.index)
states["symptomatic"] = [True, True, True] + [False] * (len(states) - 3)
states.loc[:0, "quarantine_compliance"] = 0.3
multiplier = 0.5
res = reduce_contacts_on_condition(
contacts=participating,
states=states,
multiplier=multiplier,
condition="symptomatic",
is_recurrent=True,
)
expected = pd.Series([True, False, False] + [True] * (len(states) - 3))
assert_series_equal(res, expected, check_dtype=False)
# ------------------------------------------------------------------------------------
def test_meet_daily_other_contacts():
states = pd.DataFrame()
states["symptomatic"] = [False, False, False, True]
states["knows_infectious"] = [False, False, False, False]
states["knows_immune"] = False
states["cd_received_test_result_true"] = -20
states["daily_meeting_id"] = [-1, 2, 2, 2]
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
params = pd.DataFrame()
params["value"] = [0.0, 0.0]
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params = params.set_index(["subcategory", "name"])
res = meet_daily_other_contacts(
states, params, group_col_name="daily_meeting_id", seed=None
)
expected = | pd.Series([False, True, True, False]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import Constants
# In[2]:
# Constants
INPUT_FILE_PATH = '.\\data\\sample\\sample_1.xlsx'
# In[5]:
# Import data file
df = pd.read_excel(INPUT_FILE_PATH)
df.head()
# In[10]:
result_df = pd.DataFrame()
for dept in Constants.DEPTS:
single_dept = df[df['Dept'] == dept]
filtered = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import logging
import numpy as np
import collections
import configparser
import shutil
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import requests
import io
from astropy.io import fits
from astropy.time import Time
from pathlib import Path
from matplotlib.backends.backend_pdf import PdfPages
import sphere
import sphere.utils as utils
import sphere.toolbox as toolbox
_log = logging.getLogger(__name__)
# WFS wavelength
wave_wfs = 500e-9
class Reduction(object):
'''
SPHERE/SPARTA dataset reduction class
The analysis and plotting code of this class was originally
developed by <NAME> (ESO/IPAG) and based on SAXO tools
from Jean-<NAME> (ONERA). See:
https://github.com/jmilou/sparta
for the code from <NAME>.
'''
##################################################
# Class variables
##################################################
# specify for each recipe which other recipes need to have been executed before
recipe_requirements = collections.OrderedDict([
('sort_files', []),
('sph_sparta_dtts', ['sort_files']),
('sph_sparta_wfs_parameters', ['sort_files']),
('sph_sparta_atmospheric_parameters', ['sort_files']),
('sph_query_databases', ['sort_files']),
('sph_sparta_plot', ['sort_files', 'sph_sparta_dtts', 'sph_sparta_wfs_parameters', 'sph_sparta_atmospheric_parameters']),
('sph_sparta_clean', [])
])
##################################################
# Constructor
##################################################
def __new__(cls, path, log_level='info', sphere_handler=None):
'''
Custom instantiation for the class
The customized instantiation enables to check that the
provided path is a valid reduction path. If not, None will be
returned for the reduction being created. Otherwise, an
instance is created and returned at the end.
Parameters
----------
path : str
Path to the directory containing the dataset
level : {'debug', 'info', 'warning', 'error', 'critical'}
The log level of the handler
sphere_handler : log handler
Higher-level SPHERE.Dataset log handler
'''
#
# make sure we are dealing with a proper reduction directory
#
# init path
path = Path(path).expanduser().resolve()
# zeroth-order reduction validation
raw = path / 'raw'
if not raw.exists():
_log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path))
return None
else:
reduction = super(Reduction, cls).__new__(cls)
#
# basic init
#
# init path
reduction._path = utils.ReductionPath(path)
# instrument and mode
reduction._instrument = 'SPARTA'
#
# logging
#
logger = logging.getLogger(str(path))
logger.setLevel(log_level.upper())
if logger.hasHandlers():
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8')
formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
logger.addHandler(handler)
if sphere_handler:
logger.addHandler(sphere_handler)
reduction._logger = logger
reduction._logger.info('Creating SPARTA reduction at path {}'.format(path))
#
# configuration
#
reduction._logger.debug('> read default configuration')
configfile = f'{Path(sphere.__file__).parent}/instruments/{reduction._instrument}.ini'
config = configparser.ConfigParser()
reduction._logger.debug('Read configuration')
config.read(configfile)
# reduction parameters
reduction._config = dict(config.items('reduction'))
for key, value in reduction._config.items():
try:
val = eval(value)
except NameError:
val = value
reduction._config[key] = val
#
# reduction and recipe status
#
reduction._status = sphere.INIT
reduction._recipes_status = collections.OrderedDict()
for recipe in reduction.recipe_requirements.keys():
reduction._update_recipe_status(recipe, sphere.NOTSET)
# reload any existing data frames
reduction._read_info()
reduction._logger.warning('#########################################################')
reduction._logger.warning('# WARNING! #')
reduction._logger.warning('# Support for SPARTA files is preliminary. The current #')
reduction._logger.warning('# format of product files may change in future versions #')
reduction._logger.warning('# of the pipeline until an appropriate format is found. #')
reduction._logger.warning('# Please do not blindly rely on the current format. #')
reduction._logger.warning('#########################################################')
#
# return instance
#
return reduction
##################################################
# Representation
##################################################
def __repr__(self):
return '<Reduction, instrument={}, path={}, log={}>'.format(self._instrument, self._path, self.loglevel)
def __format__(self):
return self.__repr__()
##################################################
# Properties
##################################################
@property
def loglevel(self):
return logging.getLevelName(self._logger.level)
@loglevel.setter
def loglevel(self, level):
self._logger.setLevel(level.upper())
@property
def instrument(self):
return self._instrument
@property
def path(self):
return self._path
@property
def files_info(self):
return self._files_info
@property
def dtts_info(self):
return self._dtts_info
@property
def visloop_info(self):
return self._visloop_info
@property
def irloop_info(self):
return self._irloop_info
@property
def atmospheric_info(self):
return self._atmos_info
@property
def recipe_status(self):
return self._recipes_status
@property
def config(self):
return self._config
@property
def status(self):
return self._status
##################################################
# Private methods
##################################################
def _read_info(self):
'''
Read the files, calibs and frames information from disk
files_info : dataframe
The data frame with all the information on files
This function is not supposed to be called directly by the user.
'''
self._logger.info('Read existing reduction information')
# path
path = self.path
# files info
fname = path.preproc / 'files.csv'
if fname.exists():
self._logger.debug('> read files.csv')
files_info = pd.read_csv(fname, index_col=0)
# convert times
files_info['DATE-OBS'] = | pd.to_datetime(files_info['DATE-OBS'], utc=False) | pandas.to_datetime |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/Data-Mining/ #
# --------------------------------------------------------------------------- #
# Created : Tuesday, March 9th 2021, 12:24:24 am #
# Last Modified : Tuesday, March 9th 2021, 12:24:24 am #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2021 nov8.ai #
# =========================================================================== #
# =========================================================================== #
# 1. LIBRARIES #
# =========================================================================== #
#%%
# System and python libraries
from abc import ABC, abstractmethod
import datetime
import glob
import itertools
from joblib import dump, load
import os
import pickle
import time
import uuid
# Manipulating, analyzing and processing data
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy as sp
from scipy.stats.stats import pearsonr, f_oneway
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.preprocessing import OneHotEncoder, PowerTransformer
from category_encoders import TargetEncoder, LeaveOneOutEncoder
# Feature and model selection and evaluation
from sklearn.feature_selection import RFECV, SelectKBest
from sklearn.feature_selection import VarianceThreshold, f_regression
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
# Regression based estimators
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
# Tree-based estimators
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Visualizing data
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
# Utilities
from utils import notify, PersistEstimator, comment, print_dict, print_dict_keys
# Data Source
from data import AmesData
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
pd.set_option('mode.chained_assignment', None)
# =========================================================================== #
# COLUMNS #
# =========================================================================== #
discrete = ["Year_Built","Year_Remod_Add","Bsmt_Full_Bath","Bsmt_Half_Bath",
"Full_Bath","Half_Bath","Bedroom_AbvGr","Kitchen_AbvGr","TotRms_AbvGrd",
"Fireplaces","Garage_Cars","Mo_Sold","Year_Sold", "Garage_Yr_Blt"]
continuous = ["Lot_Frontage","Lot_Area","Mas_Vnr_Area","BsmtFin_SF_1","BsmtFin_SF_2",
"Bsmt_Unf_SF","Total_Bsmt_SF","First_Flr_SF","Second_Flr_SF","Low_Qual_Fin_SF",
"Gr_Liv_Area","Garage_Area","Wood_Deck_SF","Open_Porch_SF","Enclosed_Porch",
"Three_season_porch","Screen_Porch","Pool_Area","Misc_Val"]
numeric = discrete + continuous
n_nominal_levels = 191
nominal = ['MS_SubClass', 'MS_Zoning', 'Street', 'Alley', 'Land_Contour', 'Lot_Config', 'Neighborhood',
'Condition_1', 'Condition_2', 'Bldg_Type', 'House_Style', 'Roof_Style', 'Roof_Matl',
'Exterior_1st', 'Exterior_2nd', 'Mas_Vnr_Type', 'Foundation', 'Heating', 'Central_Air',
'Garage_Type', 'Misc_Feature', 'Sale_Type', 'Sale_Condition']
ordinal = ['BsmtFin_Type_1', 'BsmtFin_Type_2', 'Bsmt_Cond', 'Bsmt_Exposure',
'Bsmt_Qual', 'Electrical', 'Exter_Cond', 'Exter_Qual', 'Fence', 'Fireplace_Qu',
'Functional', 'Garage_Cond', 'Garage_Finish', 'Garage_Qual', 'Heating_QC', 'Kitchen_Qual',
'Land_Slope', 'Lot_Shape', 'Overall_Cond', 'Overall_Qual', 'Paved_Drive', 'Pool_QC', 'Utilities']
pre_features = ['PID', 'MS_SubClass', 'MS_Zoning', 'Lot_Frontage', 'Lot_Area', 'Street',
'Alley', 'Lot_Shape', 'Land_Contour', 'Utilities', 'Lot_Config',
'Land_Slope', 'Neighborhood', 'Condition_1', 'Condition_2', 'Bldg_Type',
'House_Style', 'Overall_Qual', 'Overall_Cond', 'Year_Built',
'Year_Remod_Add', 'Roof_Style', 'Roof_Matl', 'Exterior_1st',
'Exterior_2nd', 'Mas_Vnr_Type', 'Mas_Vnr_Area', 'Exter_Qual',
'Exter_Cond', 'Foundation', 'Bsmt_Qual', 'Bsmt_Cond', 'Bsmt_Exposure',
'BsmtFin_Type_1', 'BsmtFin_SF_1', 'BsmtFin_Type_2', 'BsmtFin_SF_2',
'Bsmt_Unf_SF', 'Total_Bsmt_SF', 'Heating', 'Heating_QC', 'Central_Air',
'Electrical', 'First_Flr_SF', 'Second_Flr_SF', 'Low_Qual_Fin_SF',
'Gr_Liv_Area', 'Bsmt_Full_Bath', 'Bsmt_Half_Bath', 'Full_Bath',
'Half_Bath', 'Bedroom_AbvGr', 'Kitchen_AbvGr', 'Kitchen_Qual',
'TotRms_AbvGrd', 'Functional', 'Fireplaces', 'Fireplace_Qu',
'Garage_Type', 'Garage_Yr_Blt', 'Garage_Finish', 'Garage_Cars',
'Garage_Area', 'Garage_Qual', 'Garage_Cond', 'Paved_Drive',
'Wood_Deck_SF', 'Open_Porch_SF', 'Enclosed_Porch', 'Three_season_porch',
'Screen_Porch', 'Pool_Area', 'Pool_QC', 'Fence', 'Misc_Feature',
'Misc_Val', 'Mo_Sold', 'Year_Sold', 'Sale_Type', 'Sale_Condition',
'Longitude', 'Latitude']
post_features = ['PID', 'MS_SubClass', 'MS_Zoning', 'Lot_Frontage', 'Lot_Area', 'Street',
'Alley', 'Lot_Shape', 'Land_Contour', 'Utilities', 'Lot_Config',
'Land_Slope', 'Neighborhood', 'Condition_1', 'Condition_2', 'Bldg_Type',
'House_Style', 'Overall_Qual', 'Overall_Cond', 'Year_Built',
'Year_Remod_Add', 'Roof_Style', 'Roof_Matl', 'Exterior_1st',
'Exterior_2nd', 'Mas_Vnr_Type', 'Mas_Vnr_Area', 'Exter_Qual',
'Exter_Cond', 'Foundation', 'Bsmt_Qual', 'Bsmt_Cond', 'Bsmt_Exposure',
'BsmtFin_Type_1', 'BsmtFin_SF_1', 'BsmtFin_Type_2', 'BsmtFin_SF_2',
'Bsmt_Unf_SF', 'Total_Bsmt_SF', 'Heating', 'Heating_QC', 'Central_Air',
'Electrical', 'First_Flr_SF', 'Second_Flr_SF', 'Low_Qual_Fin_SF',
'Gr_Liv_Area', 'Bsmt_Full_Bath', 'Bsmt_Half_Bath', 'Full_Bath',
'Half_Bath', 'Bedroom_AbvGr', 'Kitchen_AbvGr', 'Kitchen_Qual',
'TotRms_AbvGrd', 'Functional', 'Fireplaces', 'Fireplace_Qu',
'Garage_Type', 'Garage_Yr_Blt', 'Garage_Finish', 'Garage_Cars',
'Garage_Area', 'Garage_Qual', 'Garage_Cond', 'Paved_Drive',
'Wood_Deck_SF', 'Open_Porch_SF', 'Enclosed_Porch', 'Three_season_porch',
'Screen_Porch', 'Pool_Area', 'Pool_QC', 'Fence', 'Misc_Feature',
'Misc_Val', 'Mo_Sold', 'Year_Sold', 'Sale_Type', 'Sale_Condition',
'Age', 'Garage_Age']
# =========================================================================== #
# ORDINAL MAP #
# =========================================================================== #
ordinal_map = {'BsmtFin_Type_1': {'ALQ': 5, 'BLQ': 4, 'GLQ': 6, 'LwQ': 2, 'No_Basement': 0, 'Rec': 3, 'Unf': 1},
'BsmtFin_Type_2': {'ALQ': 5, 'BLQ': 4, 'GLQ': 6, 'LwQ': 2, 'No_Basement': 0, 'Rec': 3, 'Unf': 1},
'Bsmt_Cond': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Basement': 0, 'Poor': 1, 'Typical': 3},
'Bsmt_Exposure': {'Av': 3, 'Gd': 4, 'Mn': 2, 'No': 1, 'No_Basement': 0},
'Bsmt_Qual': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Basement': 0, 'Poor': 1, 'Typical': 3},
'Electrical': {'FuseA': 4, 'FuseF': 2, 'FuseP': 1, 'Mix': 0, 'SBrkr': 5, 'Unknown': 3},
'Exter_Cond': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Exter_Qual': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Fence': {'Good_Privacy': 4, 'Good_Wood': 2, 'Minimum_Privacy': 3, 'Minimum_Wood_Wire': 1,'No_Fence': 0},
'Fireplace_Qu': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Fireplace': 0, 'Poor': 1, 'Typical': 3},
'Functional': {'Maj1': 3, 'Maj2': 2, 'Min1': 5, 'Min2': 6, 'Mod': 4, 'Sal': 0, 'Sev': 1, 'Typ': 7},
'Garage_Cond': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Garage': 0, 'Poor': 1, 'Typical': 3},
'Garage_Finish': {'Fin': 3, 'No_Garage': 0, 'RFn': 2, 'Unf': 1},
'Garage_Qual': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Garage': 0, 'Poor': 1, 'Typical': 3},
'Heating_QC': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Kitchen_Qual': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Land_Slope': {'Gtl': 0, 'Mod': 1, 'Sev': 2},
'Lot_Shape': {'Irregular': 0, 'Moderately_Irregular': 1, 'Regular': 3, 'Slightly_Irregular': 2},
'Overall_Cond': {'Above_Average': 5, 'Average': 4,'Below_Average': 3,'Excellent': 8,'Fair': 2,
'Good': 6,'Poor': 1,'Very_Excellent': 9,'Very_Good': 7,'Very_Poor': 0},
'Overall_Qual': {'Above_Average': 5,'Average': 4,'Below_Average': 3,'Excellent': 8,'Fair': 2,
'Good': 6,'Poor': 1,'Very_Excellent': 9,'Very_Good': 7,'Very_Poor': 0},
'Paved_Drive': {'Dirt_Gravel': 0, 'Partial_Pavement': 1, 'Paved': 2},
'Pool_QC': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'No_Pool': 0, 'Typical': 2},
'Utilities': {'AllPub': 2, 'NoSeWa': 0, 'NoSewr': 1}}
# =========================================================================== #
# ESTIMATORS #
# =========================================================================== #
model_groups = {
"Regressors": {
"Linear Regression": {
"Estimator": LinearRegression(),
"Parameters": {"normalize": [False],"n_jobs": [4],"copy_X": [True]}
},
"Lasso": {
"Estimator": Lasso(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0]}
},
"Ridge": {
"Estimator": Ridge(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0]}
},
"ElasticNet": {
"Estimator": ElasticNet(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0],
"l1_ratio": np.arange(0.0,1.0,0.1)}
}
},
"Ensembles": {
"Random Forest": {
"Estimator": RandomForestRegressor(),
"Parameters": {
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"criterion": ["mse"],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"],
"n_jobs": [4]}
},
"AdaBoost": {
"Estimator": AdaBoostRegressor(),
"Parameters": {
"base_estimator": [DecisionTreeRegressor()],
"n_estimators": [50,100],
"learning_rate": [0.001, 0.01, 0.05, 0.1, 0.25, 0.50, 0.75, 1.0]}
},
"Bagging": {
"Estimator": BaggingRegressor(),
"Parameters": {
"base_estimator": [DecisionTreeRegressor()],
"n_estimators": [50,100],
"max_features": [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"n_jobs": [4]}
},
"Extra Trees": {
"Estimator": ExtraTreesRegressor(),
"Parameters": {
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"],
"n_jobs": [4]}
},
"Gradient Boosting": {
"Estimator": GradientBoostingRegressor(),
"Parameters": {
"learning_rate": [0.15,0.1,0.05,0.01,0.005,0.001],
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"criterion": ["mse"],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"]}
},
"Histogram Gradient Boosting": {
"Estimator": HistGradientBoostingRegressor(),
"Parameters": {
"learning_rate": [0.15,0.1,0.05,0.01,0.005,0.001],
"max_depth": [2,3,4,5,6],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10]}
}
}
}
regressors = model_groups["Regressors"]
ensembles = model_groups["Ensembles"]
# =========================================================================== #
# 0. BASE TRANSFORMER #
# =========================================================================== #
class BaseTransformer(ABC):
def __init__(self):
pass
def fit(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
self._fit(X_old, y_old)
return self
def transform(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
X_new = self._transform(X_old, y_old)
assert(len(X_new.columns) == len(X_old.columns)), f"Old new columns mismatch"
assert(X_new.isnull().sum().sum() == 0), f"Warning nulls in test after clean"
X["X"] = X_new
return X
# =========================================================================== #
# 1. BASE SELECTOR #
# =========================================================================== #
class BaseSelector(ABC):
def __init__(self):
pass
def fit(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
self._fit(X_old, y_old)
return self
def transform(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
X_new = self._transform(X_old, y_old)
assert(X_new.isnull().sum().sum() == 0), f"Warning nulls"
X["X"] = X_new
X["Features"] = X_new.columns
return X
# =========================================================================== #
# 2. DATA CLEANER #
# =========================================================================== #
class DataCleaner(BaseSelector):
def __init__(self):
pass
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "transform")
X["Garage_Yr_Blt"].replace(to_replace=2207, value=2007, inplace=True)
X = X.drop(columns=["Latitude", "Longitude"])
X = X.fillna(X.median())
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 3. FEATURE ENGINEERING #
# =========================================================================== #
class FeatureEngineer(BaseSelector):
def __init__(self):
pass
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "transform")
# Add an age feature and remove year built
X["Age"] = X["Year_Sold"] - X["Year_Built"]
X["Age"].fillna(X["Age"].median())
# Add age feature for garage.
X["Garage_Age"] = X["Year_Sold"] - X["Garage_Yr_Blt"]
X["Garage_Age"].fillna(value=0,inplace=True)
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 4. CONTINUOUS PREPROCESSING #
# =========================================================================== #
class ContinuousPreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, continuous=continuous):
self._continuous = continuous
def _fit(self, X, y=None, **fit_params):
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
# Impute missing values as linear function of other features
imputer = IterativeImputer()
X[self._continuous] = imputer.fit_transform(X[self._continuous])
# Power transformation to make feature distributions closer to Guassian
power = PowerTransformer(method="yeo-johnson", standardize=False)
X[self._continuous] = power.fit_transform(X[self._continuous])
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._continuous] = scaler.fit_transform(X[self._continuous])
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 5. DISCRETE PREPROCESSING #
# =========================================================================== #
class DiscretePreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, strategy="most_frequent", discrete=discrete):
self._strategy = strategy
self._discrete = discrete
def _fit(self, X, y=None, **fit_params):
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
# Missing discrete variables will be imputed according to the strategy provided
# Default strategy is the mean.
imputer = SimpleImputer(strategy=self._strategy)
X[self._discrete] = imputer.fit_transform(X[self._discrete])
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._discrete] = scaler.fit_transform(X[self._discrete])
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 6. ORDINAL PREPROCESSING #
# =========================================================================== #
class OrdinalPreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, strategy="most_frequent", ordinal=ordinal,
ordinal_map=ordinal_map):
self._strategy = strategy
self._ordinal = ordinal
self._ordinal_map = ordinal_map
def _fit(self, X, y=None, **fit_params):
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
categorical = list(X.select_dtypes(include=["object"]).columns)
# Create imputer object
imputer = SimpleImputer(strategy=self._strategy)
# Perform imputation of categorical variables to most frequent
X[self._ordinal] = imputer.fit_transform(X[self._ordinal])
# Map levels to ordinal mappings
for variable, mappings in self._ordinal_map.items():
for k,v in mappings.items():
X[variable].replace({k:v}, inplace=True)
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._ordinal] = scaler.fit_transform(X[self._ordinal])
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 7. TARGET LEAVE-ONE-OUT ENCODER #
# =========================================================================== #
class TargetEncoderLOO(TargetEncoder):
"""Leave-one-out target encoder.
Source: https://brendanhasz.github.io/2019/03/04/target-encoding
"""
def __init__(self, cols=nominal):
"""Leave-one-out target encoding for categorical features.
Parameters
----------
cols : list of str
Columns to target encode.
"""
self.cols = cols
def fit(self, X, y):
"""Fit leave-one-out target encoder to X and y
Parameters
----------
X : pandas DataFrame, shape [n_samples, n_columns]
DataFrame containing columns to target encode
y : pandas Series, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X
if str(X[col].dtype)=='object']
# Check columns are in X
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
# Encode each element of each column
self.sum_count = dict()
for col in self.cols:
self.sum_count[col] = dict()
uniques = X[col].unique()
for unique in uniques:
ix = X[col]==unique
count = X[X[col] == unique].shape[0]
singleton = "N" if (count > 1) else "Y"
self.sum_count[col][unique] = \
(y[ix].sum(),ix.sum(), singleton)
# Return the fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses leave-one-out target encoding for the training fold,
and uses normal target encoding for the test fold.
Parameters
----------
X : pandas DataFrame, shape [n_samples, n_columns]
DataFrame containing columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Create output dataframe
Xo = X.copy()
# Use normal target encoding if this is test data
if y is None:
for col in self.sum_count.keys():
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
vals[X[col]==cat] = sum_count[0]/sum_count[1]
Xo[col] = vals
# LOO target encode each column
else:
for col in self.sum_count.keys():
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
ix = X[col]==cat
if sum_count[2] == "Y":
vals[ix] = sum_count[0]/sum_count[1]
else:
vals[ix] = (sum_count[0]-y[ix])/(sum_count[1]-1)
Xo[col] = vals
# Return encoded DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data via target encoding.
Parameters
----------
X : pandas DataFrame, shape [n_samples, n_columns]
DataFrame containing columns to encode
y : pandas Series, shape = [n_samples]
Target values (required!).
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
# =========================================================================== #
# 8. NOMINAL PREPROCESSING #
# =========================================================================== #
class NominalPreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, encoder=TargetEncoderLOO(cols=nominal), nominal=nominal):
self._encoder = encoder
self._nominal = nominal
def _fit(self, X, y=None, **fit_params):
notify.entering(__class__.__name__, "fit")
notify.leaving(__class__.__name__, "fit")
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
notify.leaving(__class__.__name__, "transform")
self._encoder.fit(X, y)
X = self._encoder.transform(X, y)
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._nominal] = scaler.fit_transform(X[self._nominal])
#X = X.fillna(X.mean())
return X
def fit_transform(self, X,y=None):
self.fit(X,y)
return self.transform(X,y)
# =========================================================================== #
# 9. BASE FILTER #
# =========================================================================== #
class BaseFilter(BaseSelector):
def __init__(self):
pass
def report(self, X, y=None):
classname = self.__class__.__name__
message = f"The following {len(self.features_removed_)} features were removed from the data.\n{self.features_removed_}."
comment.regarding(classname, message)
# =========================================================================== #
# 10. COLLINEARITY FILTER #
# =========================================================================== #
class CollinearityFilter(BaseFilter):
def __init__(self, features, threshold=0.65, alpha=0.05, numeric=numeric):
self._threshold = threshold
self._alpha = alpha
self._features = features
self._numeric = numeric
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "_fit")
self.features_removed_ = []
correlations = pd.DataFrame()
all_columns = X.columns.tolist()
columns = list(set(X.columns.tolist()).intersection(self._numeric))
# Perform pairwise correlation coefficient calculations
for col_a, col_b in itertools.combinations(columns,2):
r, p = pearsonr(X[col_a], X[col_b])
cols = col_a + "__" + col_b
d = {"Columns": cols, "A": col_a, "B": col_b,"Correlation": r, "p-value": p}
df = pd.DataFrame(data=d, index=[0])
correlations = pd.concat((correlations, df), axis=0)
# Now compute correlation between features and target.
relevance = pd.DataFrame()
for column in columns:
r, p = pearsonr(X.loc[:,column], y)
d = {"Feature": column, "Correlation": r, "p-value": p}
df = pd.DataFrame(data=d, index=[0])
relevance = pd.concat((relevance,df), axis=0)
# Obtain observations above correlation threshold and below alpha
self.suspects_ = correlations[(correlations["Correlation"] >= self._threshold) & (correlations["p-value"] <= self._alpha)]
if self.suspects_.shape[0] == 0:
self.X_ = X
return
# Iterate over suspects and determine column to remove based upon
# correlation with target
to_remove = []
for index, row in self.suspects_.iterrows():
a = np.abs(relevance[relevance["Feature"] == row["A"]]["Correlation"].values)
b = np.abs(relevance[relevance["Feature"] == row["B"]]["Correlation"].values)
if a > b:
to_remove.append(row["B"])
else:
to_remove.append(row["A"])
self.X_ = X.drop(columns=to_remove)
self.features_removed_ += to_remove
self._fit(self.X_,y)
notify.leaving(__class__.__name__, "fit_")
return self.X_
# =========================================================================== #
# 11. ANOVA FILTER #
# =========================================================================== #
class AnovaFilter(BaseFilter):
"""Eliminates predictors with equal between means w.r.t response."""
def __init__(self, alpha=0.05, ordinal=ordinal, nominal=nominal):
self._alpha = alpha
self._ordinal = ordinal
self._nominal = nominal
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "_fit")
results = pd.DataFrame()
all_columns = X.columns.tolist()
categorical = self._ordinal + self._nominal
columns = list(set(X.columns.tolist()).intersection(categorical))
# Measure variance between predictor levels w.r.t. the response
self.remaining_ = pd.DataFrame()
self.features_removed_ = []
for column in columns:
f, p = f_oneway(X[column], y)
if p > self._alpha:
self.features_removed_.append(column)
else:
d = {"Feature": column, "F-statistic": f, "p-value": p}
df = pd.DataFrame(data=d, index=[0])
self.remaining_ = | pd.concat((self.remaining_, df), axis=0) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
from matplotlib import get_backend
from numba import jit
from scipy.integrate import odeint, solve_ivp
from numba import jit
###############################################################################
#Globals
###############################################################################
#Refer for details: https://seaborn.pydata.org/tutorial/color_palettes.html
palette_types = {'color': lambda n_colors, **kwargs : sns.color_palette(n_colors=n_colors, **{**{'palette': 'muted'}, **kwargs}),
'light': lambda n_colors, **kwargs : sns.light_palette(n_colors=n_colors+2, **{**{'color':'steel'}, **kwargs})[2:],
'dark' : lambda n_colors, **kwargs : sns.dark_palette( n_colors=n_colors+2, **{**{'color':'steel'}, **kwargs})[2:],
'diverging': lambda n_colors, **kwargs : sns.diverging_palette(n=n_colors, **{**{'h_pos': 250, 'h_neg':15}, **kwargs}),
'cubehelix': lambda n_colors, **kwargs : sns.cubehelix_palette(n_colors=n_colors, **kwargs),
}
#Refer for details: https://xkcd.com/color/rgb/
all_colors = sns.colors.xkcd_rgb
###############################################################################
#Integration
###############################################################################
def solver(f, y0, tspan,**kwargs):
f_ = lambda t, y, *args: f(y, t, *args)
r = solve_ivp(f_, [tspan[0], tspan[-1]], y0, t_eval=tspan, **kwargs)
return r.y.T
def piecewise_integrate(function, init, tspan, params, model_num, scenario_num, modify_init=None, modify_params=None, solver_args={}, solver=odeint, overlap=True, args=()):
'''Piecewise integration function with scipy.integrate.odeint as default.
Can be changed using the solver argument.
Parameters
----------
function : function
A function for numerical integration that takes in the form f(states,
time, params, args). states, params and args are numpy arrays while time
is a float or numpy float. Will be integrated using scipy.integrate.odeint
init : numpy.array
Initial values of states for numerical integration.
tspan : list of numpy.array
Segments for piecewise integration where each segment is a numpy array of
time points in ascending order.
params : numpy.array
An array of parameter values.
model_num : int
Number of the model.
scenario_num : int
Number of the scenario.
modify_init : function, optional
Function for modifying initial values before integrating over a time
segment. The default is None.
modify_params : function, optional
Function for modifying parameter values before integrating over a time
segment. The default is None.
solver_args : dict, optional
Dictionary of keyword arguments for scipy.integrate.odeint. The default is {}.
solver : scipy.integrate.odeint, optional
The integrating function. The default is odeint. Do not touch unless you
know what you are doing.
overlap : bool, optional
Avoids double counting between segments. In general, False is required
only for plotting. The default is True.
args : tuple, optional
A tuple of arguments. The default is ().
Returns
-------
y_model : numpy.array
An array of values from numerical integration.
t_model : numpy.array
An array of time points.
'''
solver_args1 = solver_args if solver_args else {}
tspan_ = tspan[0]
init_ = modify_init(init_values=init, params=params, model_num=model_num, scenario_num=scenario_num, segment=0) if modify_init else init
params_ = modify_params(init_values=init_, params=params, model_num=model_num, scenario_num=scenario_num, segment=0) if modify_params else params
y_model = solver(function, init_, tspan_, args=tuple([params_]) + args, **solver_args1)
t_model = tspan[0]
for segment in range(1, len(tspan)):
tspan_ = tspan[segment]
init_ = modify_init(init_values=y_model[-1], params=params, model_num=model_num, scenario_num=scenario_num, segment=segment) if modify_init else y_model[-1]
params_ = modify_params(init_values=init_, params=params, model_num=model_num, scenario_num=scenario_num, segment=segment) if modify_params else params
y_model_ = solver(function, init_, tspan_, args=tuple([params_]) + args, **solver_args1)
y_model = np.concatenate((y_model, y_model_), axis=0) if overlap else np.concatenate((y_model[:-1], y_model_), axis=0)
t_model = np.concatenate((t_model, tspan_), axis=0) if overlap else np.concatenate((t_model[:,-1], tspan_), axis=0)
return y_model, t_model
#Templates for modify_init
def modify_init(init_values, params, model_num, scenario_num, segment):
'''
Return a new np.array of initial values. For safety, DO NOT MODIFY IN PLACE.
:meta private:
'''
new_init = init_values.copy()
return new_init
def modify_params(init_values, params, model_num, scenario_num, segment):
'''
Return a new np.array of initial values. For safety, DO NOT MODIFY IN PLACE.
:meta private:
'''
new_params = params.copy()
return new_params
###############################################################################
#Multi-Model Integration
###############################################################################
def integrate_models(models, params, *extra_variables, args=(), mode='np', overlap=True, multiply=True):
'''Integrates models with params
Parameters
----------
models : dict
A dictionary of model data structures.
params : pandas.DataFrame
A DataFrame of parameter values for integration.
*extra_variables : function
Additional functions for evaluating the integrated results.
args : tuple, optional
Additional arguments for the functions to be integrated. The default is ().
mode : {'np', 'df'}, optional
Use 'np' if the functions in extra_variables are meant to work with numpy
arrays and 'pd' if functions are meant to work with DataFrames. The
default is 'np'.
overlap : TYPE, optional
Whether or not to include the overlapping points between time segments. The default is True.
multiply : bool, optional
Permutes parameters and scenarios if True and vice versa. The default is True.
Returns
-------
dict
A dictionary of the integrated results.
dict
A dictionary of the calculated extra variables.
'''
print('Simulating models')
y_models = {}
e_models = {v: {model_num: {scenario_num: {} for scenario_num in models[model_num]['init']} for model_num in models} for v in extra_variables}
if type(params) == dict:
try:
params_ = | pd.DataFrame(params) | pandas.DataFrame |
'''
This method uses these features
['dow', 'year', 'month', 'day_of_week', 'holiday_flg', 'min_visitors', 'mean_visitors', 'median_visitors', 'max_visitors', 'count_observations', 'air_genre_name', 'air_area_name', 'latitude', 'longitude', 'rs1_x', 'rv1_x', 'rs2_x', 'rv2_x', 'rs1_y', 'rv1_y', 'rs2_y', 'rv2_y', 'total_reserv_sum', 'total_reserv_mean', 'total_reserv_dt_diff_mean']
RMSE GradientBoostingRegressor: 0.501477019571
RMSE KNeighborsRegressor: 0.421517079307
'''
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred)**0.5
data = {
'tra': pd.read_csv('./data/air_visit_data.csv'),
'as': pd.read_csv('./data/air_store_info.csv'),
'hs': pd.read_csv('./data/hpg_store_info.csv'),
'ar': pd.read_csv('./data/air_reserve.csv'),
'hr': pd.read_csv('./data/hpg_reserve.csv'),
'id': pd.read_csv('./data/store_id_relation.csv'),
'tes': pd.read_csv('./data/sample_submission.csv'),
'hol': pd.read_csv('./data/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
# add 'air_store_id' to the last of data['hr']
data['hr'] = pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id'])
for df in ['ar', 'hr']:
# get year, month, day, get rid of time
data[df]['visit_datetime'] = pd.to_datetime(data[df]['visit_datetime'])
data[df]['visit_datetime'] = data[df]['visit_datetime'].dt.date
data[df]['reserve_datetime'] = pd.to_datetime(data[df]['reserve_datetime'])
data[df]['reserve_datetime'] = data[df]['reserve_datetime'].dt.date
data[df]['reserve_datetime_diff'] = data[df].apply(lambda r: (r['visit_datetime'] - r['reserve_datetime']).days,
axis=1)
tmp1 = data[df].groupby(['air_store_id', 'visit_datetime'], as_index=False)[
['reserve_datetime_diff', 'reserve_visitors']].sum().rename(
columns={'visit_datetime': 'visit_date', 'reserve_datetime_diff': 'rs1', 'reserve_visitors': 'rv1'})
tmp2 = data[df].groupby(['air_store_id', 'visit_datetime'], as_index=False)[
['reserve_datetime_diff', 'reserve_visitors']].mean().rename(
columns={'visit_datetime': 'visit_date', 'reserve_datetime_diff': 'rs2', 'reserve_visitors': 'rv2'})
data[df] = pd.merge(tmp1, tmp2, how='inner', on=['air_store_id', 'visit_date'])
data['tra']['visit_date'] = pd.to_datetime(data['tra']['visit_date'])
data['tra']['dow'] = data['tra']['visit_date'].dt.dayofweek
data['tra']['year'] = data['tra']['visit_date'].dt.year
data['tra']['month'] = data['tra']['visit_date'].dt.month
data['tra']['visit_date'] = data['tra']['visit_date'].dt.date
data['tes']['visit_date'] = data['tes']['id'].map(lambda x: str(x).split('_')[2])
data['tes']['air_store_id'] = data['tes']['id'].map(lambda x: '_'.join(x.split('_')[:2]))
data['tes']['visit_date'] = pd.to_datetime(data['tes']['visit_date'])
data['tes']['dow'] = data['tes']['visit_date'].dt.dayofweek
data['tes']['year'] = data['tes']['visit_date'].dt.year
data['tes']['month'] = data['tes']['visit_date'].dt.month
data['tes']['visit_date'] = data['tes']['visit_date'].dt.date
unique_stores = data['tes']['air_store_id'].unique()
# count week
stores = pd.concat([pd.DataFrame({'air_store_id': unique_stores, 'dow': [i]*len(unique_stores)}) for i in range(7)], axis=0, ignore_index=True).reset_index(drop=True)
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].min().rename(columns={'visitors':'week_min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].mean().rename(columns={'visitors':'week_mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].median().rename(columns={'visitors':'week_median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].max().rename(columns={'visitors':'week_max_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].count().rename(columns={'visitors':'week_count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
# count all
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].min().rename(columns={'visitors':'all_min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].mean().rename(columns={'visitors':'all_mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].median().rename(columns={'visitors':'all_median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].max().rename(columns={'visitors':'all_max_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].count().rename(columns={'visitors':'all_count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
# count year
stores1 = pd.concat([pd.DataFrame({'air_store_id': unique_stores})], axis=0, ignore_index=True).reset_index(drop=True)
data2016 = data['tra'][data['tra']['year'].isin([2016])]
data2017 = data['tra'][data['tra']['year'].isin([2017])]
# count 2016
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].min().rename(columns={'visitors':'2016_min_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].mean().rename(columns={'visitors':'2016_mean_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].median().rename(columns={'visitors':'2016_median_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].max().rename(columns={'visitors':'2016_max_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].count().rename(columns={'visitors':'2016_count_observations'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
# count 2017
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].min().rename(columns={'visitors':'2017_min_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].mean().rename(columns={'visitors':'2017_mean_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].median().rename(columns={'visitors':'2017_median_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].max().rename(columns={'visitors':'2017_max_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].count().rename(columns={'visitors':'2017_count_observations'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
stores = pd.merge(stores, stores1, how='left', on=['air_store_id'])
stores = pd.merge(stores, data['as'], how='left', on=['air_store_id'])
lbl = preprocessing.LabelEncoder()
stores['air_genre_name'] = lbl.fit_transform(stores['air_genre_name'])
stores['air_area_name'] = lbl.fit_transform(stores['air_area_name'])
data['hol']['visit_date'] = pd.to_datetime(data['hol']['visit_date'])
data['hol']['day_of_week'] = lbl.fit_transform(data['hol']['day_of_week'])
data['hol']['visit_date'] = data['hol']['visit_date'].dt.date
train = pd.merge(data['tra'], data['hol'], how='left', on=['visit_date'])
test = pd.merge(data['tes'], data['hol'], how='left', on=['visit_date'])
train = pd.merge(train, stores, how='left', on=['air_store_id','dow'])
test = pd.merge(test, stores, how='left', on=['air_store_id','dow'])
for df in ['ar','hr']:
# data[df].to_csv(df + '.csv')
train = | pd.merge(train, data[df], how='left', on=['air_store_id','visit_date']) | pandas.merge |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-09')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*1/4, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-12')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-13', '2015-01-14')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextWithSplitAdjustedWindows(NextWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp('2015-02-10')
test_start_date = pd.Timestamp('2015-01-06', tz='utc')
test_end_date = pd.Timestamp('2015-01-12', tz='utc')
split_adjusted_asof = pd.Timestamp('2015-01-08')
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
'estimate1': [1100., 1200.],
'estimate2': [2100., 2200.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-08'),
pd.Timestamp('2015-01-11')],
'estimate1': [1110., 1210.],
'estimate2': [2110., 2210.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
})
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (.3, 3.),
'effective_date': (pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09')),
})
sid_1_splits = pd.DataFrame({
SID_FIELD_NAME: 1,
'ratio': (.4, 4.),
'effective_date': (pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09')),
})
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(
WithSplitAdjustedMultipleEstimateColumns, cls
).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]['estimate1'])
assert_almost_equal(estimate2, timelines[today]['estimate2'])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
assert_almost_equal(
estimate1, timelines_1q_out[today]['estimate1']
)
class SomeFactor2(CustomFactor):
inputs = [dataset2.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate2):
assert_almost_equal(
estimate2, timelines_2q_out[today]['estimate2']
)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est1': SomeFactor1(), 'est2': SomeFactor2()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
class PreviousWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 3),
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 3),
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 2 +
[[np.NaN, 1110.]]),
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[np.NaN, 2110.]])
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] +
[[np.NaN, 1110. * 4]] +
[[1100 * 3., 1110. * 4]]),
'estimate2': np.array([[np.NaN, np.NaN]] +
[[np.NaN, 2110. * 4]] +
[[2100 * 3., 2110. * 4]])
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 2 +
[[1200 * 3., 1210. * 4]]),
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[2200 * 3., 2210. * 4]])
}
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[2100 * 3., 2110. * 4]])
}
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithMultipleEstimateColumns(
PreviousWithSplitAdjustedMultipleEstimateColumns
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
class NextWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] +
[[1100. * 1/.3, 1110. * 1/.4]] * 2),
'estimate2': np.array([[np.NaN, np.NaN]] +
[[2100. * 1/.3, 2110. * 1/.4]] * 2),
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate1': np.array([[1100., 1110.]] * 3),
'estimate2': np.array([[2100., 2110.]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate1': np.array([[1100., 1110.]] * 3),
'estimate2': np.array([[2100., 2110.]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate1': np.array([[1100 * 3., 1210. * 4]] * 3),
'estimate2': np.array([[2100 * 3., 2210. * 4]] * 3)
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate1': np.array([[1200 * 3., np.NaN]] * 3),
'estimate2': np.array([[2200 * 3., np.NaN]] * 3)
}
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] +
[[2200 * 1/.3, 2210. * 1/.4]] * 2)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate2': np.array([[2200., 2210.]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate2': np.array([[2200, 2210.]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate2': np.array([[2200 * 3., np.NaN]] * 3)
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
}
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextWithMultipleEstimateColumns(
NextWithSplitAdjustedMultipleEstimateColumns
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
class WithAdjustmentBoundaries(WithEstimates):
"""
ZiplineTestCase mixin providing class-level attributes, methods,
and a test to make sure that when the split-adjusted-asof-date is not
strictly within the date index, we can still apply adjustments correctly.
Attributes
----------
split_adjusted_before_start : pd.Timestamp
A split-adjusted-asof-date before the start date of the test.
split_adjusted_after_end : pd.Timestamp
A split-adjusted-asof-date before the end date of the test.
split_adjusted_asof_dates : list of tuples of pd.Timestamp
All the split-adjusted-asof-dates over which we want to parameterize
the test.
Methods
-------
make_expected_out -> dict[pd.Timestamp -> pd.DataFrame]
A dictionary of the expected output of the pipeline at each of the
dates of interest.
"""
START_DATE = pd.Timestamp('2015-01-04')
# We want to run the pipeline starting from `START_DATE`, but the
# pipeline results will start from the next day, which is
# `test_start_date`.
test_start_date = pd.Timestamp('2015-01-05')
END_DATE = test_end_date = pd.Timestamp('2015-01-12')
split_adjusted_before_start = (
test_start_date - timedelta(days=1)
)
split_adjusted_after_end = (
test_end_date + timedelta(days=1)
)
# Must parametrize over this because there can only be 1 such date for
# each set of data.
split_adjusted_asof_dates = [(test_start_date,),
(test_end_date,),
(split_adjusted_before_start,),
(split_adjusted_after_end,)]
@classmethod
def init_class_fixtures(cls):
super(WithAdjustmentBoundaries, cls).init_class_fixtures()
cls.s0 = cls.asset_finder.retrieve_asset(0)
cls.s1 = cls.asset_finder.retrieve_asset(1)
cls.s2 = cls.asset_finder.retrieve_asset(2)
cls.s3 = cls.asset_finder.retrieve_asset(3)
cls.s4 = cls.asset_finder.retrieve_asset(4)
cls.expected = cls.make_expected_out()
@classmethod
def make_events(cls):
# We can create a sid for each configuration of dates for KDs, events,
# and splits. For this test we don't care about overwrites so we only
# test 1 quarter.
sid_0_timeline = pd.DataFrame({
# KD on first date of index
TS_FIELD_NAME: cls.test_start_date,
EVENT_DATE_FIELD_NAME: | pd.Timestamp('2015-01-09') | pandas.Timestamp |
""""Interface between the JS Download script and the Database storage"""
import os
from datetime import datetime as dt
import time
import json
import argparse
from Naked.toolshed.shell import execute_js, muterun_js
import pandas as pd
from database import instance as db
from tools import exp_to_int
blockSeries = 5000
attemptsThreshold = 10
logsSeparate = False
parseToInt = False
tempFilename = 'data/temp.json'
dataDownloaderScript = '--max-old-space-size=16384 data-downloader.js'
def loadRawData(filepath):
start = time.time()
try:
with open(filepath) as json_data:
loadedData = json.load(json_data)
print("Loading the data took "+str(time.time() - start)+" seconds")
return loadedData
except FileNotFoundError:
return None
def convertTimestamp(x):
if 'date' in x:
key = 'date'
elif 'time' in x:
key = 'time'
else:
raise ValueError('Unsupported timestamp format in given data %s.' % x.keys())
x['date'] = dt.utcfromtimestamp(x.pop(key, None)) #remove the old key, convert to date and replace it with 'date'
def parseInt(data):
if type(data) == dict:
for key in data: #parse to int
if type(data[key]) == str:
try:
base = 10
if data[key].startswith('0x'):
base = 16
data[key] = int(data[key], base=base)
except ValueError:
pass
elif type(data) == list:
for i, val in enumerate(data):
if type(val) == str:
try:
base = 10
if val.startswith('0x'):
base = 16
data[i] = int(val, base=base)
except ValueError:
pass
def processRawCourseData(data):
start = time.time()
for x in data:
convertTimestamp(x)
def downloadCourse():
callDataDownloaderCourse(tempFilename)
data = loadRawData(tempFilename) #get it
os.remove(tempFilename)
print("Downloaded data with length "+str(len(data))+" ticks") #debug
processRawCourseData(data) #process a bit to make it suitable for storage
df = | pd.DataFrame(data) | pandas.DataFrame |
# Copyright 2019 <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Plot Service will make use of appropriately decorated functions in this module.
import datetime
import logging
import re
import time
from collections import namedtuple
from enum import auto
from numbers import Real
from dateutil import tz
import cachetools.func
import numpy as np
import pandas as pd
from pandas import Series
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, USMemorialDay, USLaborDay, USThanksgivingDay, \
nearest_workday
from gs_quant.api.gs.assets import GsIdType
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.fields import Fields
from gs_quant.datetime.gscalendar import GsCalendar
from gs_quant.datetime.point import relative_days_add
from gs_quant.errors import MqTypeError, MqValueError
from gs_quant.markets.securities import *
from gs_quant.markets.securities import Asset, AssetIdentifier, SecurityMaster
from gs_quant.target.common import AssetClass, FieldFilterMap, AssetType, Currency
from gs_quant.timeseries.helper import log_return, plot_measure
GENERIC_DATE = Union[datetime.date, str]
TD_ONE = datetime.timedelta(days=1)
_logger = logging.getLogger(__name__)
MeasureDependency: namedtuple = namedtuple("MeasureDependency", ["id_provider", "query_type"])
# TODO: get NERC Calendar from SecDB
class NercCalendar(AbstractHolidayCalendar):
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def _to_fx_strikes(strikes):
out = []
for strike in strikes:
if strike == 50:
out.append('ATMS')
elif strike < 50:
out.append(f'{round(strike)}DC')
else:
out.append(f'{round(abs(100 - strike))}DP')
return out
class SkewReference(Enum):
DELTA = 'delta'
NORMALIZED = 'normalized'
SPOT = 'spot'
FORWARD = 'forward'
class VolReference(Enum):
DELTA_CALL = 'delta_call'
DELTA_PUT = 'delta_put'
DELTA_NEUTRAL = 'delta_neutral'
NORMALIZED = 'normalized'
SPOT = 'spot'
FORWARD = 'forward'
class VolSmileReference(Enum):
SPOT = 'spot'
FORWARD = 'forward'
class EdrDataReference(Enum):
DELTA_CALL = 'delta_call'
DELTA_PUT = 'delta_put'
FORWARD = 'forward'
class ForeCastHorizon(Enum):
THREE_MONTH = '3m'
SIX_MONTH = '6m'
ONE_YEAR = '1y'
EOY1 = 'EOY1'
EOY2 = 'EOY2'
EOY3 = 'EOY3'
EOY4 = 'EOY4'
class BenchmarkType(Enum):
LIBOR = 'LIBOR'
EURIBOR = 'EURIBOR'
STIBOR = 'STIBOR'
OIS = 'OIS'
class RatesConversionType(Enum):
DEFAULT_BENCHMARK_RATE = auto()
INFLATION_BENCHMARK_RATE = auto()
CROSS_CURRENCY_BASIS = auto()
CURRENCY_TO_DEFAULT_RATE_BENCHMARK = {
'USD': 'USD-LIBOR-BBA',
'EUR': 'EUR-EURIBOR-Telerate',
'GBP': 'GBP-LIBOR-BBA',
'JPY': 'JPY-LIBOR-BBA'
}
CURRENCY_TO_INFLATION_RATE_BENCHMARK = {
'GBP': 'CPI-UKRPI',
'EUR': 'CPI-CPXTEMU'
}
CROSS_TO_CROSS_CURRENCY_BASIS = {
'JPYUSD': 'USD-3m/JPY-3m',
'USDJPY': 'USD-3m/JPY-3m',
'USDEUR': 'EUR-3m/USD-3m',
'EURUSD': 'EUR-3m/USD-3m',
'USDGBP': 'GBP-3m/USD-3m',
'GBPUSD': 'GBP-3m/USD-3m'
}
def cross_stored_direction_for_fx_vol(asset_id: str) -> str:
result_id = asset_id
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
if asset.asset_class is AssetClass.FX:
bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is not None:
legit_usd_cross = str.startswith(bbid, "USD") and not str.endswith(bbid, ("EUR", "GBP", "NZD", "AUD"))
legit_eur_cross = str.startswith(bbid, "EUR")
legit_jpy_cross = str.endswith(bbid, "JPY") and not str.startswith(bbid, ("KRW", "IDR", "CLP", "COP"))
odd_cross = bbid in ("EURUSD", "GBPUSD", "NZDUSD", "AUDUSD", "JPYKRW", "JPYIDR", "JPYCLP", "JPYCOP")
if not legit_usd_cross and not legit_eur_cross and not legit_jpy_cross and not odd_cross:
cross = bbid[3:] + bbid[:3]
cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID)
result_id = cross_asset.get_marquee_id()
except TypeError:
result_id = asset_id
return result_id
def cross_to_usd_based_cross(asset_id: str) -> str:
result_id = asset_id
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
if asset.asset_class is AssetClass.FX:
bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is not None and not str.startswith(bbid, "USD"):
cross = bbid[3:] + bbid[:3]
cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID)
result_id = cross_asset.get_marquee_id()
except TypeError:
result_id = asset_id
return result_id
def currency_to_default_benchmark_rate(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
except TypeError:
result = asset_id
return result
def currency_to_inflation_benchmark_rate(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.INFLATION_BENCHMARK_RATE)
except TypeError:
result = asset_id
return result
def cross_to_basis(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.CROSS_CURRENCY_BASIS)
except TypeError:
result = asset_id
return result
def convert_asset_for_rates_data_set(from_asset: Asset, c_type: RatesConversionType) -> str:
try:
bbid = from_asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is None:
return from_asset.get_marquee_id()
if c_type is RatesConversionType.DEFAULT_BENCHMARK_RATE:
to_asset = CURRENCY_TO_DEFAULT_RATE_BENCHMARK[bbid]
elif c_type is RatesConversionType.INFLATION_BENCHMARK_RATE:
to_asset = CURRENCY_TO_INFLATION_RATE_BENCHMARK[bbid]
else:
to_asset = CROSS_TO_CROSS_CURRENCY_BASIS[bbid]
return GsAssetApi.map_identifiers(GsIdType.mdapi, GsIdType.id, [to_asset])[to_asset]
except KeyError:
logging.info(f'Unsupported currency or cross ${bbid}')
raise from_asset.get_marquee_id()
def _get_custom_bd(exchange):
from pandas.tseries.offsets import CustomBusinessDay
calendar = GsCalendar.get(exchange).business_day_calendar()
return CustomBusinessDay(calendar=calendar)
@log_return(_logger, 'trying pricing dates')
def _range_from_pricing_date(exchange, pricing_date: Optional[GENERIC_DATE] = None):
if isinstance(pricing_date, datetime.date):
return pricing_date, pricing_date
today = pd.Timestamp.today().normalize()
if pricing_date is None:
t1 = today - _get_custom_bd(exchange)
return t1, t1
assert isinstance(pricing_date, str)
matcher = re.fullmatch('(\\d+)b', pricing_date)
if matcher:
start = end = today - _get_custom_bd(exchange) * int(matcher.group(1))
else:
end = today - datetime.timedelta(days=relative_days_add(pricing_date, True))
start = end - _get_custom_bd(exchange)
return start, end
def _to_offset(tenor: str) -> pd.DateOffset:
import re
matcher = re.fullmatch('(\\d+)([dwmy])', tenor)
if not matcher:
raise ValueError('invalid tenor ' + tenor)
ab = matcher.group(2)
if ab == 'd':
name = 'days'
elif ab == 'w':
name = 'weeks'
elif ab == 'm':
name = 'months'
else:
assert ab == 'y'
name = 'years'
kwarg = {name: int(matcher.group(1))}
return pd.DateOffset(**kwarg)
def _market_data_timed(q):
start = time.perf_counter()
df = GsDataApi.get_market_data(q)
_logger.debug('market data query ran in %.3f ms', (time.perf_counter() - start) * 1000)
return df
@plot_measure((AssetClass.FX, AssetClass.Equity), None, [MeasureDependency(
id_provider=cross_stored_direction_for_fx_vol, query_type=QueryType.IMPLIED_VOLATILITY)])
def skew(asset: Asset, tenor: str, strike_reference: SkewReference, distance: Real, *, location: str = 'NYC',
source: str = None, real_time: bool = False) -> Series:
"""
Difference in implied volatility of equidistant out-of-the-money put and call options.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level (for equities)
:param distance: distance from at-the-money option
:param location: location at which a price fixing has been taken (for FX assets)
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: skew curve
"""
if real_time:
raise MqValueError('real-time skew not supported')
if strike_reference in (SkewReference.DELTA, None):
b = 50
elif strike_reference == SkewReference.NORMALIZED:
b = 0
else:
b = 100
kwargs = {}
if strike_reference in (SkewReference.DELTA, None):
# using delta call strikes so X DP is represented as (100 - X) DC
q_strikes = [100 - distance, distance, b]
else:
q_strikes = [b - distance, b + distance, b]
asset_id = asset.get_marquee_id()
if asset.asset_class == AssetClass.FX:
asset_id = cross_stored_direction_for_fx_vol(asset_id)
q_strikes = _to_fx_strikes(q_strikes)
kwargs['location'] = location
column = 'deltaStrike' # should use SkewReference.DELTA for FX
else:
assert asset.asset_class == AssetClass.Equity
if not strike_reference:
raise MqTypeError('strike reference required for equities')
if strike_reference != SkewReference.NORMALIZED:
q_strikes = [x / 100 for x in q_strikes]
kwargs['strikeReference'] = strike_reference.value
column = 'relativeStrike'
kwargs[column] = q_strikes
_logger.debug('where tenor=%s and %s', tenor, kwargs)
where = FieldFilterMap(tenor=tenor, **kwargs)
q = GsDataApi.build_market_data_query([asset_id], QueryType.IMPLIED_VOLATILITY, where=where, source=source)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
curves = {k: v for k, v in df.groupby(column)}
if len(curves) < 3:
raise MqValueError('skew not available for given inputs')
series = [curves[qs]['impliedVolatility'] for qs in q_strikes]
return (series[0] - series[1]) / series[2]
@plot_measure((AssetClass.Equity, AssetClass.Commod, AssetClass.FX,), None,
[MeasureDependency(id_provider=cross_stored_direction_for_fx_vol,
query_type=QueryType.IMPLIED_VOLATILITY)])
def implied_volatility(asset: Asset, tenor: str, strike_reference: VolReference, relative_strike: Real = None, *,
source: str = None, real_time: bool = False) -> Series:
"""
Volatility of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if relative_strike is None and strike_reference != VolReference.DELTA_NEUTRAL:
raise MqValueError('Relative strike must be provided if your strike reference is not delta_neutral')
if asset.asset_class == AssetClass.FX:
if strike_reference == VolReference.DELTA_NEUTRAL:
delta_strike = 'DN'
elif strike_reference == VolReference.DELTA_CALL:
delta_strike = f'{relative_strike}DC'
elif strike_reference == VolReference.DELTA_PUT:
delta_strike = f'{relative_strike}DP'
elif strike_reference == VolReference.FORWARD:
if relative_strike == 100:
delta_strike = 'ATMF'
else:
raise MqValueError('Relative strike must be 100 for Forward strike reference')
elif strike_reference == VolReference.SPOT:
if relative_strike == 100:
delta_strike = 'ATMS'
else:
raise MqValueError('Relative strike must be 100 for Spot strike reference')
else:
raise MqValueError('strikeReference: ' + strike_reference.value + ' not supported for FX')
asset_id = cross_stored_direction_for_fx_vol(asset.get_marquee_id())
_logger.debug('where tenor=%s, deltaStrike=%s, location=NYC', tenor, delta_strike)
q = GsDataApi.build_market_data_query(
[asset_id],
QueryType.IMPLIED_VOLATILITY,
where=FieldFilterMap(tenor=tenor, deltaStrike=delta_strike, location='NYC'),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
else:
if strike_reference == VolReference.DELTA_NEUTRAL:
raise NotImplementedError('delta_neutral strike reference is not supported for equities.')
if strike_reference == VolReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike if strike_reference == VolReference.NORMALIZED else relative_strike / 100
ref_string = "delta" if strike_reference in (VolReference.DELTA_CALL,
VolReference.DELTA_PUT) else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, ref_string, relative_strike)
where = FieldFilterMap(tenor=tenor, strikeReference=ref_string, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.IMPLIED_VOLATILITY,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['impliedVolatility']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.IMPLIED_CORRELATION])
def implied_correlation(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Correlation of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied correlation curve
"""
if real_time:
raise NotImplementedError('realtime implied_correlation not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.IMPLIED_CORRELATION, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['impliedCorrelation']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.AVERAGE_IMPLIED_VOLATILITY])
def average_implied_volatility(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied volatility for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied volatility curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_volatility not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VOLATILITY,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['averageImpliedVolatility']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.AVERAGE_IMPLIED_VARIANCE])
def average_implied_variance(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied variance for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied variance curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_variance not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VARIANCE, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['averageImpliedVariance']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,), [QueryType.SWAP_RATE])
def swap_rate(asset: Asset, tenor: str, benchmark_type: BenchmarkType = None, floating_index: str = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day Fixed-Floating interest rate swap (IRS) curves across major currencies.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param benchmark_type: benchmark type e.g. LIBOR
:param floating_index: floating index rate
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swap rate curve
"""
if real_time:
raise NotImplementedError('realtime swap_rate not implemented')
currency = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
currency = Currency(currency)
# default benchmark types
if benchmark_type is None:
if currency == Currency.EUR:
benchmark_type = BenchmarkType.EURIBOR
elif currency == Currency.SEK:
benchmark_type = BenchmarkType.STIBOR
else:
benchmark_type = BenchmarkType.LIBOR
over_nights = [BenchmarkType.OIS]
# default floating index
if floating_index is None:
if benchmark_type in over_nights:
floating_index = '1d'
else:
if currency in [Currency.USD]:
floating_index = '3m'
elif currency in [Currency.GBP, Currency.EUR, Currency.CHF, Currency.SEK]:
floating_index = '6m'
mdapi_divider = " " if benchmark_type in over_nights else "-"
mdapi_floating_index = BenchmarkType.OIS.value if benchmark_type is BenchmarkType.OIS else floating_index
mdapi = currency.value + mdapi_divider + mdapi_floating_index
rate_mqid = GsAssetApi.map_identifiers(GsIdType.mdapi, GsIdType.id, [mdapi])[mdapi]
_logger.debug('where tenor=%s, floatingIndex=%s', tenor, floating_index)
q = GsDataApi.build_market_data_query(
[rate_mqid],
QueryType.SWAP_RATE,
where=FieldFilterMap(tenor=tenor),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['swapRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.SWAPTION_VOL)])
def swaption_vol(asset: Asset, expiration_tenor: str, termination_tenor: str, relative_strike: float,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime swaption_vol not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, tenor=%s, strike=%s', expiration_tenor, termination_tenor, relative_strike)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.SWAPTION_VOL,
where=FieldFilterMap(expiry=expiration_tenor, tenor=termination_tenor, strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['swaptionVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.ATM_FWD_RATE)])
def swaption_atm_fwd_rate(asset: Asset, expiration_tenor: str, termination_tenor: str, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day at-the-money forward rate for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption at-the-money forward rate curve
"""
if real_time:
raise NotImplementedError('realtime swaption_atm_fwd_rate not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, tenor=%s', expiration_tenor, termination_tenor)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, tenor=termination_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['atmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.MIDCURVE_VOL)])
def midcurve_vol(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
relative_strike: float,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for midcurve vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: midcurve implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime midcurve_vol not implemented')
_logger.debug('where expiry=%s, forwardTenor=%s, tenor=%s, strike=%s', expiration_tenor, forward_tenor,
termination_tenor, relative_strike)
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.MIDCURVE_VOL,
where=FieldFilterMap(expiry=expiration_tenor, forwardTenor=forward_tenor, tenor=termination_tenor,
strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['midcurveVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.MIDCURVE_ATM_FWD_RATE)])
def midcurve_atm_fwd_rate(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day at-the-money forward rate for midcurve vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: midcurve atm forward rate curve
"""
if real_time:
raise NotImplementedError('realtime midcurve_atm_fwd_rate not implemented')
q = GsDataApi.build_market_data_query(
[convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)],
QueryType.MIDCURVE_ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, forwardTenor=forward_tenor, tenor=termination_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['midcurveAtmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.CAP_FLOOR_VOL)])
def cap_floor_vol(asset: Asset, expiration_tenor: str, relative_strike: float, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for cap and floor vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: cap and floor implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime cap_floor_vol not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, strike=%s', expiration_tenor, relative_strike)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.CAP_FLOOR_VOL,
where=FieldFilterMap(expiry=expiration_tenor, strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['capFloorVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.CAP_FLOOR_ATM_FWD_RATE)])
def cap_floor_atm_fwd_rate(asset: Asset, expiration_tenor: str, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day at-the-money forward rate for cap and floor matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: cap and floor atm forward rate curve
"""
if real_time:
raise NotImplementedError('realtime cap_floor_atm_fwd_rate not implemented')
q = GsDataApi.build_market_data_query(
[convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)],
QueryType.CAP_FLOOR_ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['capFloorAtmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.SPREAD_OPTION_VOL)])
def spread_option_vol(asset: Asset, expiration_tenor: str, long_tenor: str, short_tenor: str, relative_strike: float,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for spread option vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param long_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param short_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: spread option implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime spread_option_vol not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, longTenor=%s, shortTenor=%s, strike=%s', expiration_tenor, long_tenor, short_tenor,
relative_strike)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.SPREAD_OPTION_VOL,
where=FieldFilterMap(expiry=expiration_tenor, longTenor=long_tenor, shortTenor=short_tenor,
strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['spreadOptionVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.SPREAD_OPTION_ATM_FWD_RATE)])
def spread_option_atm_fwd_rate(asset: Asset, expiration_tenor: str, long_tenor: str, short_tenor: str,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day At-the-money forward rate for spread option vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param long_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param short_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: spread option at-the-money forward rate curve
"""
if real_time:
raise NotImplementedError('realtime spread_option_atm_fwd_rate not implemented')
q = GsDataApi.build_market_data_query(
[convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)],
QueryType.SPREAD_OPTION_ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, longTenor=long_tenor, shortTenor=short_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['spreadOptionAtmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_inflation_benchmark_rate,
query_type=QueryType.INFLATION_SWAP_RATE)])
def zc_inflation_swap_rate(asset: Asset, termination_tenor: str, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day zero coupon inflation swap break-even rate.
:param asset: asset object loaded from security master
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: zero coupon inflation swap break-even rate curve
"""
if real_time:
raise NotImplementedError('realtime zc_inflation_swap_rate not implemented')
infl_rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.INFLATION_BENCHMARK_RATE)
_logger.debug('where tenor=%s', termination_tenor)
q = GsDataApi.build_market_data_query(
[infl_rate_benchmark_mqid],
QueryType.INFLATION_SWAP_RATE,
where=FieldFilterMap(tenor=termination_tenor),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['inflationSwapRate']
@plot_measure((AssetClass.FX,), (AssetType.Cross,),
[MeasureDependency(id_provider=cross_to_basis, query_type=QueryType.BASIS)])
def basis(asset: Asset, termination_tenor: str, *, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day cross-currency basis swap spread.
:param asset: asset object loaded from security master
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: cross-currency basis swap spread curve
"""
if real_time:
raise NotImplementedError('realtime basis not implemented')
basis_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.CROSS_CURRENCY_BASIS)
_logger.debug('where tenor=%s', termination_tenor)
q = GsDataApi.build_market_data_query(
[basis_mqid],
QueryType.BASIS,
where=FieldFilterMap(tenor=termination_tenor),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['basis']
@plot_measure((AssetClass.FX,), (AssetType.Cross,), [MeasureDependency(
id_provider=cross_to_usd_based_cross, query_type=QueryType.FORECAST)])
def forecast(asset: Asset, forecast_horizon: str, *, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day FX forecasts made by Global Investment Research (GIR) macro analysts.
:param asset: asset object loaded from security master
:param forecast_horizon: relative period of time to forecast e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: FX forecast curve
"""
if real_time:
raise NotImplementedError('realtime forecast not implemented')
cross_mqid = asset.get_marquee_id()
usd_based_cross_mqid = cross_to_usd_based_cross(cross_mqid)
horizon = '12m' if forecast_horizon == '1y' else forecast_horizon
q = GsDataApi.build_market_data_query(
[usd_based_cross_mqid],
QueryType.FORECAST,
where=FieldFilterMap(relativePeriod=horizon),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
series = Series() if df.empty else df['forecast']
if cross_mqid != usd_based_cross_mqid:
series = 1 / series
return series
@plot_measure((AssetClass.Equity, AssetClass.Commod), None, [QueryType.IMPLIED_VOLATILITY])
def vol_term(asset: Asset, strike_reference: SkewReference, relative_strike: Real,
pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False) -> pd.Series:
"""
Volatility term structure. Uses most recent date available if pricing_date is not provided.
:param asset: asset object loaded from security master
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param pricing_date: YYYY-MM-DD or relative days before today e.g. 1d, 1m, 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: volatility term structure
"""
if real_time:
raise NotImplementedError('realtime forward term not implemented') # TODO
if strike_reference != SkewReference.NORMALIZED:
relative_strike /= 100
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
_logger.debug('where strikeReference=%s, relativeStrike=%s', strike_reference.value, relative_strike)
where = FieldFilterMap(strikeReference=strike_reference.value, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.IMPLIED_VOLATILITY, where=where,
source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
cbd = _get_custom_bd(asset.exchange)
df = df.assign(expirationDate=df.index + df['tenor'].map(_to_offset) + cbd - cbd)
df = df.set_index('expirationDate')
df.sort_index(inplace=True)
df = df.loc[DataContext.current.start_date: DataContext.current.end_date]
return df['impliedVolatility'] if not df.empty else pd.Series()
@plot_measure((AssetClass.Equity,), None, [QueryType.IMPLIED_VOLATILITY])
def vol_smile(asset: Asset, tenor: str, strike_reference: VolSmileReference,
pricing_date: Optional[GENERIC_DATE] = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
Volatility smile of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param pricing_date: YYYY-MM-DD or relative days before today e.g. 1d, 1m, 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility smile
"""
if real_time:
raise NotImplementedError('realtime vol_smile not implemented')
mqid = asset.get_marquee_id()
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
q = GsDataApi.build_market_data_query(
[mqid],
QueryType.IMPLIED_VOLATILITY,
where=FieldFilterMap(tenor=tenor, strikeReference=strike_reference.value),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return Series
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
vols = df['impliedVolatility'].values
strikes = df['relativeStrike'].values
return Series(vols, index=strikes)
@plot_measure((AssetClass.Equity, AssetClass.Commod), None, [QueryType.FORWARD])
def fwd_term(asset: Asset, pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None,
real_time: bool = False) -> pd.Series:
"""
Forward term structure. Uses most recent date available if pricing_date is not provided.
:param asset: asset object loaded from security master
:param pricing_date: YYYY-MM-DD or relative days before today e.g. 1d, 1m, 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: forward term structure
"""
if real_time:
raise NotImplementedError('realtime forward term not implemented') # TODO
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
where = FieldFilterMap(strikeReference='forward', relativeStrike=1)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.FORWARD, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
cbd = _get_custom_bd(asset.exchange)
df.loc[:, 'expirationDate'] = df.index + df['tenor'].map(_to_offset) + cbd - cbd
df = df.set_index('expirationDate')
df.sort_index(inplace=True)
df = df.loc[DataContext.current.start_date: DataContext.current.end_date]
return df['forward'] if not df.empty else pd.Series()
@cachetools.func.ttl_cache() # fine as long as availability is not different between users
def _var_swap_tenors(asset: Asset):
from gs_quant.session import GsSession
aid = asset.get_marquee_id()
body = GsSession.current._get(f"/data/markets/{aid}/availability")
for r in body['data']:
if r['dataField'] == Fields.VAR_SWAP.value:
for f in r['filteredFields']:
if f['field'] == Fields.TENOR.value:
return f['values']
raise MqValueError("var swap is not available for " + aid)
def _tenor_to_month(relative_date: str) -> int:
matcher = re.fullmatch('([1-9]\\d*)([my])', relative_date)
if matcher:
mag = int(matcher.group(1))
return mag if matcher.group(2) == 'm' else mag * 12
raise MqValueError('invalid input: relative date must be in months or years')
def _month_to_tenor(months: int) -> str:
return f'{months//12}y' if months % 12 == 0 else f'{months}m'
@plot_measure((AssetClass.Equity, AssetClass.Commod), None, [QueryType.VAR_SWAP])
def var_term(asset: Asset, pricing_date: Optional[str] = None, forward_start_date: Optional[str] = None,
*, source: str = None, real_time: bool = False) -> pd.Series:
"""
Variance swap term structure. Uses most recent date available if pricing_date is not provided.
:param asset: asset object loaded from security master
:param pricing_date: relative days before today e.g. 3d, 2m, 1y
:param forward_start_date: forward start date e.g. 2m, 1y; defaults to none
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: variance swap term structure
"""
if not (pricing_date is None or isinstance(pricing_date, str)):
raise MqTypeError('pricing_date should be a relative date')
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
if forward_start_date:
tenors = _var_swap_tenors(asset)
sub_frames = []
for t in tenors:
diff = _tenor_to_month(t) - _tenor_to_month(forward_start_date)
if diff < 1:
continue
t1 = _month_to_tenor(diff)
c = var_swap(asset, t1, forward_start_date, source=source, real_time=real_time).to_frame()
if not c.empty:
c['tenor'] = t1
sub_frames.append(c)
df = pd.concat(sub_frames)
else:
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.VAR_SWAP,
source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
cbd = _get_custom_bd(asset.exchange)
df.loc[:, Fields.EXPIRATION_DATE.value] = df.index + df[Fields.TENOR.value].map(_to_offset) + cbd - cbd
df = df.set_index(Fields.EXPIRATION_DATE.value)
df.sort_index(inplace=True)
df = df.loc[DataContext.current.start_date: DataContext.current.end_date]
return df[Fields.VAR_SWAP.value] if not df.empty else pd.Series()
@plot_measure((AssetClass.Equity, AssetClass.Commod,), None, [QueryType.VAR_SWAP])
def var_swap(asset: Asset, tenor: str, forward_start_date: Optional[str] = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
Strike such that the price of an uncapped variance swap on the underlying index is zero at inception. If
forward start date is provided, then the result is a forward starting variance swap.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param forward_start_date: forward start date e.g. 2m, 1y; defaults to none
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if forward_start_date is None:
_logger.debug('where tenor=%s', tenor)
where = FieldFilterMap(tenor=tenor)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.VAR_SWAP,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df[Fields.VAR_SWAP.value]
else:
if not isinstance(forward_start_date, str):
raise MqTypeError('forward_start_date must be a relative date')
x = _tenor_to_month(tenor)
y = _tenor_to_month(forward_start_date)
z = x + y
yt = _month_to_tenor(y)
zt = _month_to_tenor(z)
tenors = _var_swap_tenors(asset)
if yt not in tenors or zt not in tenors:
return Series()
_logger.debug('where tenor=%s', f'{yt},{zt}')
where = FieldFilterMap(tenor=[yt, zt])
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.VAR_SWAP,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return Series()
grouped = df.groupby(Fields.TENOR.value)
try:
yg = grouped.get_group(yt)[Fields.VAR_SWAP.value]
zg = grouped.get_group(zt)[Fields.VAR_SWAP.value]
except KeyError:
_logger.debug('no data for one or more tenors')
return | Series() | pandas.Series |
import nibabel as nib
from nilearn import surface
from nilearn import plotting
from sklearn.utils import Bunch
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from scipy.sparse import load_npz
from scipy.sparse.csgraph import connected_components
import os
import re
from pathlib import Path
# define standard structures (for 3T HCP-like data)
struct = Bunch()
struct.cortex_left = slice(0,29696)
struct.cortex_right = slice(29696,59412)
struct.cortex = slice(0,59412)
struct.subcortical = slice(59412,None)
struct.accumbens_left = slice(59412,59547)
struct.accumbens_right = slice(59547,59687)
struct.amygdala_left = slice(59687,60002)
struct.amygdala_right = slice(60002,60334)
struct.brainStem = slice(60334,63806)
struct.caudate_left = slice(63806,64534)
struct.caudate_right = slice(64534,65289)
struct.cerebellum_left = slice(65289,73998)
struct.cerebellum_right = slice(73998,83142)
struct.diencephalon_left = slice(83142,83848)
struct.diencephalon_right = slice(83848,84560)
struct.hippocampus_left = slice(84560,85324)
struct.hippocampus_right = slice(85324,86119)
struct.pallidum_left = slice(86119,86416)
struct.pallidum_right = slice(86416,86676)
struct.putamen_left = slice(86676,87736)
struct.putamen_right = slice(87736,88746)
struct.thalamus_left = slice(88746,90034)
struct.thalamus_right = slice(90034,None)
# The fMRI data are not defined on all 32492 vertices of the 32k surface meshes
# Hence we need to record what is the mapping between the cortex grayordinates from fMRI
# and the vertices of the 32k surface meshes.
# This information is kept in vertex_info
#
# for a standard 3T HCP style fMRI image get_HCP_vertex_info(img) should coincide with vertex_info
def _make_vertex_info(grayl, grayr, num_meshl, num_meshr):
vertex_info = Bunch()
vertex_info.grayl = grayl
vertex_info.grayr = grayr
vertex_info.num_meshl = num_meshl
vertex_info.num_meshr = num_meshr
return vertex_info
PKGDATA = Path(__file__).parent / 'data'
vertex_data = np.load(PKGDATA / 'fMRI_vertex_info_32k.npz')
vertex_info = _make_vertex_info(vertex_data['grayl'], vertex_data['grayr'], int(vertex_data['num_meshl']), int(vertex_data['num_meshr']))
def get_HCP_vertex_info(img):
"""
Extracts information about the relation of indices in the fMRI data to the surface meshes and the left/right cortex.
Use only for meshes different from the 32k standard one which is loaded by default.
"""
assert isinstance(img, nib.cifti2.cifti2.Cifti2Image)
map1 = img.header.get_index_map(1)
bms = list(map1.brain_models)
grayl = np.array(bms[0].vertex_indices)
grayr = np.array(bms[1].vertex_indices)
num_meshl = bms[0].surface_number_of_vertices
num_meshr = bms[1].surface_number_of_vertices
return _make_vertex_info(grayl, grayr, num_meshl, num_meshr)
# The following three functions take a 1D array of fMRI grayordinates
# and return the array on the left- right- or both surface meshes
def left_cortex_data(arr, fill=0, vertex_info=vertex_info):
"""
Takes a 1D array of fMRI grayordinates and returns the values on the vertices of the left cortex mesh which is neccessary for surface visualization.
The unused vertices are filled with a constant (zero by default).
"""
out = np.zeros(vertex_info.num_meshl)
out[:] = fill
out[vertex_info.grayl] = arr[:len(vertex_info.grayl)]
return out
def right_cortex_data(arr, fill=0, vertex_info=vertex_info):
"""
Takes a 1D array of fMRI grayordinates and returns the values on the vertices of the right cortex mesh which is neccessary for surface visualization.
The unused vertices are filled with a constant (zero by default).
"""
out = np.zeros(vertex_info.num_meshr)
out[:] = fill
if len(arr) == len(vertex_info.grayr):
# means arr is already just the right cortex
out[vertex_info.grayr] = arr
else:
out[vertex_info.grayr] = arr[len(vertex_info.grayl):len(vertex_info.grayl) + len(vertex_info.grayr)]
return out
def cortex_data(arr, fill=0, vertex_info=vertex_info):
"""
Takes a 1D array of fMRI grayordinates and returns the values on the vertices of the full cortex mesh which is neccessary for surface visualization.
The unused vertices are filled with a constant (zero by default).
"""
dataL = left_cortex_data(arr, fill=fill, vertex_info=vertex_info)
dataR = right_cortex_data(arr, fill=fill, vertex_info=vertex_info)
return np.hstack((dataL, dataR))
# utility function for making a mesh for both hemispheres
# used internally by load_surfaces
def combine_meshes(meshL, meshR):
"""
Combines left and right meshes into a single mesh for both hemispheres.
"""
coordL, facesL = meshL
coordR, facesR = meshR
coord = np.vstack((coordL, coordR))
faces = np.vstack((facesL, facesR+len(coordL)))
return coord, faces
# loads all available surface meshes
def load_surfaces(example_filename=None, filename_sulc=None):
"""
Loads all available surface meshes and sulcal depth file.
Combines the left and right hemispheres into joint meshes for the whole brain.
With no arguments loads the HCP S1200 group average meshes.
If loading subject specific meshes it is enough to specify a single `example_filename` being one of
`white|midthickness|pial|inflated|very_inflated` type e.g.
```
mesh = load_surfaces(example_filename='PATH/sub-44.L.pial.32k_fs_LR.surf.gii')
```
The function will load all available surfaces from that location.
"""
if example_filename is None:
filename_pattern = str(PKGDATA / 'S1200.{}.{}_MSMAll.32k_fs_LR.surf.gii')
else:
filename_pattern = re.sub('\.(L|R)\.', '.{}.', example_filename)
filename_pattern = re.sub('white|midthickness|pial|inflated|very_inflated', '{}', filename_pattern)
flatsphere_pattern = str(PKGDATA / 'S1200.{}.{}.32k_fs_LR.surf.gii')
meshes = Bunch()
for variant in ['white', 'midthickness', 'pial', 'inflated', 'very_inflated', 'flat' , 'sphere']:
count = 0
for hemisphere, hemisphere_name in [('L', 'left'), ('R', 'right')]:
if variant in ['flat' , 'sphere']:
filename = flatsphere_pattern.format(hemisphere, variant)
else:
filename = filename_pattern.format(hemisphere, variant)
if os.path.exists(filename):
coord, faces = surface.load_surf_mesh(filename)
if variant=='flat':
coordnew = np.zeros_like(coord)
coordnew[:, 1] = coord[:, 0]
coordnew[:, 2] = coord[:, 1]
coordnew[:, 0] = 0
coord = coordnew
meshes[variant+'_'+hemisphere_name] = coord, faces
count += 1
else:
print('Cannot find', filename)
if count==2:
if variant == 'flat':
coordl, facesl = meshes['flat_left']
coordr, facesr = meshes['flat_right']
coordlnew = coordl.copy()
coordlnew[:, 1] = coordl[:, 1] - 250.0
coordrnew = coordr.copy()
coordrnew[:, 1] = coordr[:, 1] + 250.0
meshes['flat'] = combine_meshes( (coordlnew, facesl), (coordrnew, facesr) )
else:
meshes[variant] = combine_meshes(meshes[variant+'_left'], meshes[variant+'_right'])
if filename_sulc is None:
filename_sulc = filename_pattern.format('XX','XX').replace('XX.XX', 'sulc').replace('surf.gii','dscalar.nii')
if os.path.exists(filename_sulc):
sulc_data = - nib.load(filename_sulc).get_fdata()[0]
if len(sulc_data)==59412:
# this happens for HCP S1200 group average data
sulc_data = cortex_data(sulc_data)
meshes['sulc'] = sulc_data
num = len(meshes.sulc)
meshes['sulc_left'] = meshes.sulc[:num//2]
meshes['sulc_right'] = meshes.sulc[num//2:]
else:
print('Cannot load file {} with sulcal depth data'.format(filename_sulc))
return meshes
mesh = load_surfaces()
# parcellations
def _load_hcp_parcellation(variant=None):
allowed = ['mmp', 'ca_network', 'ca_parcels', 'yeo7', 'yeo17', 'standard']
if variant not in allowed:
print('argument should be one of ' + ','.join(allowed))
return
if variant=='standard':
parcnpz = np.load(PKGDATA / 'standard.npz')
if variant=='mmp':
parcnpz = np.load(PKGDATA / 'mmp_1.0.npz')
if variant=='ca_network':
parcnpz = np.load(PKGDATA / 'ca_network_1.1.npz')
if variant=='ca_parcels':
parcnpz = np.load(PKGDATA / 'ca_parcels_1.1.npz')
if variant=='yeo7':
parcnpz = np.load(PKGDATA / 'yeo7.npz')
if variant=='yeo17':
parcnpz = np.load(PKGDATA / 'yeo17.npz')
parcellation = Bunch()
parcellation.ids = parcnpz['ids']
parcellation.map_all = parcnpz['map_all']
labels = parcnpz['labels']
labelsdict = dict()
rgba = parcnpz['rgba']
rgbadict = dict()
for i, k in enumerate(parcellation.ids):
labelsdict[k] = labels[i]
rgbadict[k] = rgba[i]
parcellation.labels = labelsdict
parcellation.rgba = rgbadict
i = 0
nontrivial_ids = []
for k in parcellation.ids:
if k!=0:
nontrivial_ids.append(k)
i += 1
parcellation.nontrivial_ids = np.array(nontrivial_ids)
return parcellation
# predefined parcellations
mmp = _load_hcp_parcellation('mmp')
ca_network = _load_hcp_parcellation('ca_network')
ca_parcels = _load_hcp_parcellation('ca_parcels')
yeo7 = _load_hcp_parcellation('yeo7')
yeo17 = _load_hcp_parcellation('yeo17')
standard = _load_hcp_parcellation('standard')
def view_parcellation(meshLR, parcellation):
"""
View the given parcellation on an a whole brain surface mesh.
"""
# for some parcellations the numerical ids need not be consecutive
cortex_map = cortex_data(parcellation.map_all)
ids = np.unique(cortex_map)
normalized_cortex_map = np.zeros_like(cortex_map)
rgba = np.zeros((len(ids), 4))
for i in range(len(ids)):
ind = cortex_map==ids[i]
normalized_cortex_map[ind] = i
rgba[i,:] = parcellation.rgba[ids[i]]
cmap = matplotlib.colors.ListedColormap(rgba)
return plotting.view_surf(meshLR, normalized_cortex_map, symmetric_cmap=False, cmap=cmap)
def parcellation_labels(parcellation):
"""
Displays names of ROI's in a parcellation together with color coding and the corresponding numeric ids.
"""
n = len(parcellation.ids)
ncols = 4
nrows = n // ncols + 1
dpi = 72
h = 12
dh = 6
H = h + dh
Y = (nrows + 1) * H
fig_height = Y / dpi
fig, ax = plt.subplots(figsize=(18, fig_height))
X, _ = fig.get_dpi() * fig.get_size_inches()
w = X/ncols
for i in range(n):
k = parcellation.ids[i]
label = parcellation.labels[k]
if label == '':
label = 'None'
name = '{} ({})'.format(label, k)
col = i // nrows
row = i % nrows
y = Y - (row * H) - H
xi = w * (col + 0.05)
xf = w * (col + 0.25)
xt = w * (col + 0.3)
ax.text(xt, y + h/2 , name, fontsize=h, horizontalalignment='left', verticalalignment='center')
ax.add_patch(mpatches.Rectangle((xi, y), xf-xi, h ,linewidth=1,edgecolor='k',facecolor=parcellation.rgba[k]))
ax.set_xlim(0, X)
ax.set_ylim(0, Y)
ax.set_axis_off()
plt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0, wspace=0)
def parcellate(X, parcellation, method=np.mean):
"""
Parcellates the data into ROI's using `method` (mean by default). Ignores the unassigned grayordinates with id=0.
Works both for time-series 2D data and snapshot 1D data.
"""
n = np.sum(parcellation.ids!=0)
if X.ndim==2:
Xp = np.zeros((len(X), n), dtype=X.dtype)
else:
Xp = np.zeros(np, dtype=X.dtype)
i = 0
for k in parcellation.ids:
if k!=0:
if X.ndim==2:
Xp[:, i] = method(X[:, parcellation.map_all==k], axis=1)
else:
Xp[i] = method(X[parcellation.map_all==k])
i += 1
return Xp
def unparcellate(Xp, parcellation):
"""
Takes as input time-series (2D) or snapshot (1D) parcellated data.
Creates full grayordinate data with grayordinates set to the value of the parcellated data.
Can be useful for visualization.
"""
n = len(parcellation.map_all)
if Xp.ndim==2:
X = np.zeros((len(Xp), n), dtype=Xp.dtype)
else:
X = np.zeros(n, dtype=Xp.dtype)
i = 0
for k in parcellation.ids:
if k!=0:
if Xp.ndim==2:
X[:, parcellation.map_all==k] = Xp[:,i][:,np.newaxis]
else:
X[parcellation.map_all==k] = Xp[i]
i += 1
return X
def mask(X, mask, fill=0):
"""
Takes 1D data `X` and a mask `mask`. Sets the exterior of mask to a constant (by default zero).
Can be useful for visualization.
"""
X_masked = np.zeros_like(X)
X_masked[:] = fill
X_masked[mask] = X[mask]
return X_masked
def ranking(Xp, parcellation, descending=True):
"""
Returns a dataframe with sorted values in the 1D parcellated array with appropriate labels
"""
ind = np.argsort(Xp)
if descending:
ind = ind[::-1]
labels = []
ids = []
for i in range(len(Xp)):
j = ind[i]
k = parcellation.nontrivial_ids[j]
labels.append(parcellation.labels[k])
ids.append(k)
return | pd.DataFrame({'region':labels, 'id':ids, 'data':Xp[ind]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
"""Build daily-level feature sets, stitching together weather datasets and defining features.
"""
import numpy as np
import pandas as pd
import geopandas as gpd
from dask import dataframe as dd
from loguru import logger
from shapely.ops import nearest_points
from src.data.gfs.utils import grb2gdf
from src.conf import settings
start_year = 2017
end_year = 2019
OUTPUT_DIR = settings.DATA_DIR / "processed/training/"
if __name__ == "__main__":
df = pd.concat(
[
pd.read_parquet(settings.DATA_DIR / f"processed/caiso_hourly/{y}.parquet")
for y in range(2017, 2020)
]
)
df.index = df.index.tz_convert("US/Pacific")
# Preprocessed hourly data is in MWh, so we can simply sum up to resample to days
df = df.groupby(pd.Grouper(freq="D")).sum()
df.reset_index(inplace=True)
# By construction, we are interested in Feb to May (inclusive)
season_filter = df["timestamp"].dt.month.isin(range(2, 6))
df = df[season_filter]
# Define whether something is a weekday/weekend
df["is_weekday"] = df["timestamp"].dt.weekday.isin([5, 6])
# Integrate forecast data
gfs_data_files = (
settings.DATA_DIR
/ f"interim/gfs/ca/gfs_3_201[7-9][01][2-5]*_0000_{i*3:03}.parquet"
for i in range(5, 10)
)
forecasts = [*(gfs_data_files)]
dayahead_weather = dd.read_parquet(forecasts).compute()
# Add UTC timezone and convert to US/Pacific
dayahead_weather["timestamp"] = (
dayahead_weather["valid_time"].dt.tz_localize("UTC").dt.tz_convert("US/Pacific")
)
dayahead_weather = grb2gdf(dayahead_weather)
# Include powerplant data
counties = gpd.read_file(
settings.DATA_DIR / "processed/geography/CA_Counties/CA_Counties_TIGER2016.shp"
)
weather_point_measurements = dayahead_weather["geometry"].geometry.unary_union
powerplants = pd.read_parquet(
settings.DATA_DIR / f"processed/geography/powerplants.parquet"
)
# Add geometry
powerplants = gpd.GeoDataFrame(
powerplants,
geometry=gpd.points_from_xy(powerplants["longitude"], powerplants["latitude"]),
crs="EPSG:4326",
)
powerplants["geometry"] = (
powerplants["geometry"]
.apply(lambda x: nearest_points(x, weather_point_measurements))
.str.get(1)
)
# In order to integrate powerplant data, we have to merge on the powerplant's closest county location.
powerplants = gpd.tools.sjoin(
powerplants.to_crs("EPSG:4326"),
counties[["GEOID", "geometry"]].to_crs("EPSG:4326"),
op="within",
how="left",
)
powerplants["online_date"] = powerplants["online_date"].dt.tz_localize("US/Pacific")
powerplants["retire_date"] = powerplants["retire_date"].dt.tz_localize("US/Pacific")
# Now group over GEOIDs, and sum up the capacity
# For each month, we have to only associate capacity for powerplants that were online.
weather_orig = dayahead_weather.copy()
capacities = {}
results = []
for date, weather_df in dayahead_weather.groupby(
pd.Grouper(key="timestamp", freq="MS"), as_index=False
):
if weather_df.empty:
logger.warning("Weather data for {date} is empty!", date=date)
continue
logger.debug("Assigning capacity for weather points as of {date}.", date=date)
valid_plants = (powerplants["online_date"] <= date) & (
powerplants["retire_date"].isnull() | (powerplants["retire_date"] > date)
)
valid_plants = powerplants[valid_plants]
county_mw = valid_plants.groupby("GEOID", as_index=False)["capacity_mw"].sum()
weather_df = weather_df.merge(county_mw, on="GEOID", how="left")
weather_df["capacity_mw"] = weather_df["capacity_mw"].fillna(0)
results.append(weather_df)
# Note that this is still on the original df grain as we did not aggregate the groupby!
dayahead_weather = pd.concat(results, ignore_index=True)
# Roll-up to dailies
daily_capacity = (
dayahead_weather.groupby(by=["GEOID", pd.Grouper(key="timestamp", freq="D")])[
"capacity_mw"
]
.mean()
.reset_index()
.groupby(by=pd.Grouper(key="timestamp", freq="D"))["capacity_mw"]
.sum()
)
county_level_dailies = dayahead_weather.groupby(
by=["GEOID", | pd.Grouper(key="timestamp", freq="D") | pandas.Grouper |
from ctypes import util
from logging.config import valid_ident
import time
import Common.ApiClient as ac
import Common.DatetimeUtility as utility
import MA.ExponentialMovingAverageStrategy as ema
from datetime import datetime
from dateutil import tz
import pandas as pd
ENDPOINT = "https://paper-api.alpaca.markets"
class TradingStrategy:
def __init__(self, STOCK,API_KEY_ID,SECRET_KEY,model='ema'):
self.model = model
self.Datetime_Utility = utility.DatetimeUtility()
self.STOCK = STOCK
self.SELL_LIMIT_FACTOR = 1.01 # 1 percent margin
self.client = ac.ApiClient(
api_key_Id=API_KEY_ID, api_key_secret=SECRET_KEY)
# Get past one year closing data
self.df = self.get_past255_closing_prices()
if self.model.lower() == 'stl':
pass
else:
self.ema_instance = ema.ExponentialMovingAverageStrategy(df=self.df.copy(
deep=True), ticker=STOCK) # you can replace this with SimpleMovingAverage
trained_model, predicted = self.ema_instance.generate_train_model(
ticker=STOCK, plot=False)
self.trained_model = trained_model
self.trained_label = predicted
def update_strategy_model(self):
now = datetime.now(tz=tz.gettz('America/New_York'))
if self.Datetime_Utility.is_market_open_now(now):
date_str = now.date().strftime("%Y-%m-%d")
last_date_str = f'{date_str} 05:00:00+00:00'
last_price, volume = self.get_current_price()
# create a row with the current price as close price for updating the training models,
# the rest are filled with the same value but what we care is for close
data = {'open': last_price, 'high': last_price,
'low': last_price, 'close': last_price, 'volume': volume}
today_df = pd.DataFrame(
data, index=[pd.to_datetime(last_date_str)])
time = pd.to_datetime(today_df.index)
today_df.set_index(time, inplace=True)
today_df.index.name = "time"
# append the current price, so that we can predicated on updated model
df = self.df.copy()
df = | pd.concat([df, today_df]) | pandas.concat |
import inspect
import os
import warnings
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
from evalml.exceptions import ValidationErrorCode
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_rename_column_names_to_numeric,
are_datasets_separated_by_gap_time_index,
are_ts_parameters_valid_for_split,
classproperty,
contains_all_ts_parameters,
convert_to_seconds,
deprecate_arg,
get_importable_subclasses,
get_random_seed,
import_or_raise,
jupyter_check,
pad_with_nans,
save_plot,
validate_holdout_datasets,
)
@patch("importlib.import_module")
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(
ImportError,
match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message",
):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(
Exception,
match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!",
):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = (
inspect.signature(get_random_seed).parameters["min_bound"].default
)
default_max_bound = (
inspect.signature(get_random_seed).parameters["max_bound"].default
)
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(
min_bound=None, max_bound=None
): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(
random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound,
)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array(
[
i
if (min_bound <= i and i < max_bound)
else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals
]
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(
vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound
),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10),
)
np.testing.assert_equal(
get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5),
)
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch("importlib.import_module")
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(
UserWarning,
match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message",
):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(
UserWarning,
match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!",
):
import_or_raise("attr_error_lib", warning=True)
@patch("evalml.utils.gen_utils.import_or_raise")
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch("evalml.utils.gen_utils.import_or_raise")
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize(
"data,num_to_pad,expected",
[
(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3], dtype="float64")),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(
pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2,
pd.Series([np.nan, np.nan, 1, 2, 3, 4], dtype="float64"),
),
(
pd.DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}),
0,
pd.DataFrame(
{
"a": | pd.Series([1.0, 2.0, 3.0], dtype="float64") | pandas.Series |
import pandas as pd
import heapq
import folium
from folium.plugins import HeatMap
from pandas.io.json import json_normalize
def load_uber_file(movement):
'''
Can read all kinds of uber movement csv files
:param movement: the uber movement csv file
:return: a cleaned dataframe contains travel times and movement ids
'''
assert isinstance(movement, str)
assert len(movement) != 0
return pd.read_csv(movement)
def extract_movement_id(la_map):
'''
By reading the map file, we can extract the coordinates of different uber movement ids
:param la_map: the Los Angeles map json file provided by Uber
:return: a dataframe contains each movement id and their coordinates
'''
assert isinstance(la_map, str)
assert len(la_map) != 0
map_LA = | pd.read_json(la_map, orient='records') | pandas.read_json |
import numpy as np
import pandas as pd
import pytest
@pytest.fixture(scope="module")
def df_vartypes():
data = {
"Name": ["tom", "nick", "krish", "jack"],
"City": ["London", "Manchester", "Liverpool", "Bristol"],
"Age": [20, 21, 19, 18],
"Marks": [0.9, 0.8, 0.7, 0.6],
"dob": | pd.date_range("2020-02-24", periods=4, freq="T") | pandas.date_range |
def ConvertBaselineJson(siteUuidList):
userUuidCards = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
SiteuserUuidCards = GetBaselineJson([siteUuid])
if len(SiteuserUuidCards) == 0:
continue
columns = [col for col in SiteuserUuidCards if col.startswith('cardIds')]
melted = SiteuserUuidCards[columns + ['userUuid','enrollDate']].melt(id_vars=['userUuid','enrollDate'])
melted = melted.drop(columns=['variable'])
melted = melted.dropna(subset=['value'])
melted['cardType'] = melted.value.apply(lambda x:x.split('-')[0])
melted['cardFirstSix'] = melted.value.apply(lambda x:x.split('-')[1])
melted['cardLastFour'] = melted.value.apply(lambda x:x.split('-')[2])
melted['siteUuid'] = siteUuid
melted['merchantUuid'] = GetSiteInfo(siteUuid)['merchantUuid']
melted = melted.rename(columns = {'value':'cardId'})
userUuidCards = userUuidCards.append(melted,sort=False)
return userUuidCards
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
def GetAllVisibleSites():
"""
Return a list of sites
"""
import pandas as pd
SiteInfo = pd.read_csv('/Users/alessandroorfei/PycharmProjects/aggregate-incremental/resources/gas_merchant_service.csv')
SiteInfo = SiteInfo[(SiteInfo.visibility == "DEFAULT")].copy()
return list(SiteInfo['siteUuid'])
def GetBaselineJson(siteUuidList):
s3 = boto3.resource('s3')
if type(siteUuidList) != list:
siteUuidList = [siteUuidList]
AllSites = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
merchantUuid = GetSiteInfo(siteUuid)['merchantUuid']
content_object = s3.Object('data.upside-services.com',
'service-station/' + merchantUuid + '/' + siteUuid + '/data/analysis/baseline.json')
file_content = content_object.get()['Body'].read().decode('utf-8')
d = json.loads(file_content)
SiteuserUuidCards = pd.DataFrame()
for user in range(0, len(d['userBaselines'])):
d_flat = flatten_json(d['userBaselines'][user])
dat = json_normalize(d_flat)
SiteuserUuidCards['siteUuid'] = siteUuid
SiteuserUuidCards = SiteuserUuidCards.append(dat, ignore_index=True, sort=False)
AllSites = AllSites.append(SiteuserUuidCards, sort=False)
return AllSites
def GetIncremental(siteUuidList, StartDate, EndDate, userUuidList=[]):
"""
This function returns a dataframe of Incremental data for a siteUuid
parameters:
siteUuidList: site identifiers. e.g. ['e30a6caa-efdd-4d5d-92ad-010d1d158a35']
StartDate: string date, e.g. "2018-04-01"
EndDate: string date, e.g. "2018-10-31"
returns:
DataFrame with Incremental date converted to datetime
"""
import os
import pandas as pd
os.system('pip2 install --upgrade runbookcli')
os.chdir('/Users/alessandroorfei/Desktop/')
if type(siteUuidList) != list:
siteUuidList = [siteUuidList]
Incremental = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
incremental_downloader = 'runbook get_incremental prod ' + 'incremental_' + siteUuid + '.csv --sites ' + siteUuid
print(incremental_downloader)
os.system(incremental_downloader)
SiteIncremental = pd.read_csv('/Users/alessandroorfei/Desktop/' + 'incremental_' + siteUuid + '.csv')
SiteIncremental['date'] = pd.to_datetime(SiteIncremental.date)
SiteIncremental = SiteIncremental[(SiteIncremental.date >= pd.to_datetime(StartDate))
& (SiteIncremental.date <= | pd.to_datetime(EndDate) | pandas.to_datetime |
import numpy as np
import pandas as pd
###############################################################################
#Non-Standard Imports
###############################################################################
import dunlin._utils_model.ode_classes as umo
import dunlin.standardfile as stf
###############################################################################
#Main Instantiation Algorithm
###############################################################################
def read_file(*filenames, **kwargs):
dun_data = stf.read_file(*filenames)
models = make_models(dun_data, **kwargs)
return dun_data, models
def make_models(dun_data, _check_sub=True):
models = {section['model_key'] : Model(**section) for section in dun_data if 'model_key' in section}
if _check_sub:
[model._check_sub(model.model_key) for model in models.values()]
return models
###############################################################################
#Dunlin Model
###############################################################################
class Model:
'''
This is the front-end class for representing a model.
'''
#Hierarchy management
_cache = {}
_sub = {}
#Attribute management
_checkkw = True
_locked = ['model_key', 'rxns', 'vrbs', 'funcs', 'rts']
_df = ['states', 'params']
_kw = {'int_args' : {'method' : 'LSODA'},
'sim_args' : {},
'optim_args' : {},
'strike_goldd_args' : {},
}
###############################################################################
#Hierarchy Tracking
###############################################################################
@staticmethod
def _find_submodels(model_data):
rxns = model_data['rxns']
subs = []
if not rxns:
return subs
for rxn_args in rxns.values():
if hasattr(rxn_args, 'items'):
sub = rxn_args.get('submodel')
if sub:
sub = [sub, len(rxn_args['substates']), len(rxn_args['subparams'])]
subs.append(sub)
elif 'submodel ' in rxn_args.get('rxn', ''):
sub = [rxn_args['rxn'][9:], len(rxn_args['substates']), len(rxn_args['subparams'])]
subs.append(sub)
else:
if 'submodel ' in rxn_args[0]:
sub = rxn_args[0][9:], len(rxn_args[1]), len(rxn_args[2])
subs.append(sub)
return subs
@classmethod
def _check_sub(cls, model_key, _super=()):
if model_key in _super:
raise SubmodelRecursionError(*_super, model_key)
subs = cls._sub[model_key]
for (sub, y_args, p_args) in subs:
#Check if submodel exists
if sub not in cls._cache:
raise MissingSubmodelError(model_key, sub)
#Check number of substates and subparams
if len(cls._cache[sub].get_state_names()) != y_args:
raise SubmodelLenError(model_key, sub, 'states(y)')
elif len(cls._cache[sub].get_param_names()) != p_args:
raise SubmodelLenError(model_key, sub, 'params(p)')
cls._check_sub(sub, _super=_super+(model_key,))
###############################################################################
#Instantiation
###############################################################################
def __init__(self, model_key, states, params,
rxns=None, vrbs=None, funcs=None, rts=None,
exvs=None, events=None, tspan=None,
**kwargs
):
#Set the locked attributes using the super method
tspan_ = {} if tspan is None else {}
super().__setattr__('model_key', model_key)
super().__setattr__('_states_tuple', tuple(states.keys()))
super().__setattr__('_params_tuple', tuple(params.keys()))
super().__setattr__('rxns', rxns )
super().__setattr__('vrbs', vrbs )
super().__setattr__('funcs', funcs )
super().__setattr__('rts', rts )
super().__setattr__('exvs', exvs )
super().__setattr__('events', events )
super().__setattr__('tspan', tspan_ )
#Set property based attributes
self.states = states
self.params = params
#Set analysis settings
for k, v in {**self._kw, **kwargs}.items():
if k not in self._kw and self._checkkw:
msg = f'Attempted to instantiate Model with invalid attribute: {k}'
raise AttributeError(msg)
super().__setattr__(k, v)
#Check types
if any([type(x) != str for x in self._states_tuple]):
raise NameError('States can only have strings as names.')
if any([type(x) != str for x in self._params_tuple]):
raise NameError('Params can only have strings as names.')
#Prepare dict to create functions
model_data = self.to_dict()
#Create functions
super().__setattr__('ode', umo.ODEModel(**model_data))
#Set mode
self._mode = 'ode'
#Track model and submodels
self._sub[model_key] = self._find_submodels(model_data)
self._cache[model_key] = self
def new(self, **kwargs):
args = self.to_dict()
args = {**args, **kwargs}
return type(self)(**args)
###############################################################################
#Attribute Management
###############################################################################
def _df2dict(self, attr, value):
if type(value) in [dict, pd.DataFrame]:
df = pd.DataFrame(value)
elif type(value) == pd.Series:
df = pd.DataFrame(value).T
else:
raise TypeError(f"Model object's '{attr} attribute can be assigned using dict, DataFrame or Series.")
#Check values
if df.isnull().values.any():
raise ValueError('Missing or NaN values.')
#Extract values
keys = list(getattr(self, '_' + attr + '_tuple'))
try:
df = df[keys]
except KeyError:
raise ModelMismatchError(keys, df.keys())
#Save as dict
return dict(zip(df.index, df.values))
def _dict2df(self, attr):
dct = getattr(self, '_'+attr)
# df = pd.DataFrame(dct).from_dict(dct, 'index')
df = | pd.DataFrame(dct) | pandas.DataFrame |
# MLP
import csv
from itertools import islice
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold, train_test_split
import pandas as pd
from sklearn.utils import shuffle
import tensorflow as tf
def bit2attr(bitstr) -> list:
attr_vec = list()
for i in range(len(bitstr)):
attr_vec.append(int(bitstr[i]))
return attr_vec
def mean_relative_error(y_pred, y_test):
assert len(y_pred) == len(y_test)
mre = 0.0
for i in range(len(y_pred)):
mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])
mre = mre * 100/ len(y_pred)
return mre
Large_MRE_points = pd.DataFrame()
Large_MRE_X = []
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
'''
1) 数据预处理
'''
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
filepath = 'data/database/22-01-29-descriptor-train.csv'
data = pd.read_csv(filepath, encoding='gb18030')
print(data.shape)
data = data.dropna()
print(data.shape)
data = shuffle(data)
data_x_df = data.drop(['label'], axis=1)
data_y_df = data[['label']]
# 归一化
min_max_scaler_X = MinMaxScaler()
min_max_scaler_X.fit(data_x_df)
x_trans1 = min_max_scaler_X.transform(data_x_df)
min_max_scaler_y = MinMaxScaler()
min_max_scaler_y.fit(data_y_df)
y_trans1 = min_max_scaler_y.transform(data_y_df)
test_filepath = "data/database/22-01-29-descriptor-test-level-1.csv"
test_data = pd.read_csv(test_filepath, encoding='gb18030')
print('test data: ', test_data.shape)
test_data_x_df = test_data.drop(['label'], axis=1)
test_data_y_df = test_data[['label']]
x_trans1_test = min_max_scaler_X.transform(test_data_x_df)
y_trans1_test = min_max_scaler_y.transform(test_data_y_df)
'''
3) 构建模型
'''
from keras.layers import MaxPooling1D, Conv1D, Dense, Flatten, Dropout
from keras import models, regularizers
from keras.optimizers import Adam, RMSprop, SGD
def buildModel():
model = models.Sequential()
l4 = Dense(512, activation='relu')
l5 = Dropout(rate=0.2)
l6 = Dense(128, activation='relu')
l7 = Dense(30, activation='relu')
l8 = Dense(1)
layers = [l4, l5, l6, l7, l8]
for i in range(len(layers)):
model.add(layers[i])
adam = Adam(lr=1e-3)
model.compile(optimizer=adam, loss='logcosh', metrics=['mae'])
model_mlp = MLPRegressor(
hidden_layer_sizes=(512, 128, 32), activation='relu', solver='lbfgs', alpha=0.0001,
max_iter=5000,
random_state=1, tol=0.0001, verbose=False, warm_start=False)
return model
def scheduler(epoch, lr):
if epoch > 0 and epoch % 500 == 0:
return lr * 0.1
else:
return lr
'''
4) 训练模型
'''
from sklearn import metrics
# n_split = 10
mlp_scores = []
MAEs = []
out_MAEs = []
in_y_test = []
in_y_pred = []
out_y_test = []
out_y_pred = []
X_train = x_trans1
y_train = y_trans1
callback = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
model_mlp = buildModel()
model_mlp.fit(X_train, y_train, epochs=2000, verbose=1, callbacks=[callback])
# 外部验证
X_test = x_trans1_test
result = model_mlp.predict(x_trans1_test)
y_trans1_test = np.reshape(y_trans1_test, (-1, 1))
y_test = min_max_scaler_y.inverse_transform(y_trans1_test)
result = result.reshape(-1, 1)
result = min_max_scaler_y.inverse_transform(result)
mae = mean_relative_error(y_test, result)
out_MAEs.append(mae)
Large_MRE_X = [] ## Type of X_test??
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1]))
X_test = min_max_scaler_X.inverse_transform(X_test)
for idx in range(len(y_test)):
Large_MRE.append(mean_relative_error([result[idx]], [y_test[idx]])[0])
Large_MRE_y_test = list(np.reshape(y_test, (-1,)))
Large_MRE_y_pred = list(np.reshape(result, (-1,)))
temp = | pd.DataFrame(X_test) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import sdc
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
self.assertEqual(hpat_func(A), test_impl(A))
def test_ts_map(self):
def test_impl(A):
return A.map(lambda x: x.hour)
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date(self):
def test_impl(A):
return A.map(lambda x: x.date())[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date2(self):
def test_impl(df):
return df.apply(lambda row: row.dt_ind.date(), axis=1)[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_ts_map_date_set(self):
def test_impl(df):
df['hpat_date'] = df.dt_ind.map(lambda x: x.date())
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
hpat_func(df)
df['pd_date'] = df.dt_ind.map(lambda x: x.date())
np.testing.assert_array_equal(df['hpat_date'], df['pd_date'])
def test_date_series_unbox(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series().map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_date_series_unbox2(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_datetime_index_set(self):
def test_impl(df):
df['sdc'] = pd.DatetimeIndex(df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
hpat_func(df)
df['std'] = pd.DatetimeIndex(df['str_date'])
allequal = (df['std'].equals(df['sdc']))
self.assertTrue(allequal)
def test_timestamp(self):
def test_impl():
dt = datetime(2017, 4, 26)
ts = pd.Timestamp(dt)
return ts.day + ts.hour + ts.microsecond + ts.month + ts.nanosecond + ts.second + ts.year
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_extract(self):
def test_impl(s):
return s.month
hpat_func = sdc.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
month = hpat_func(ts)
self.assertEqual(month, 4)
def test_timestamp_date(self):
def test_impl(s):
return s.date()
hpat_func = sdc.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
self.assertEqual(hpat_func(ts), test_impl(ts))
def test_datetimeindex_str_comp(self):
def test_impl(df):
return (df.A >= '2011-10-23').values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = sdc.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetimeindex_str_comp2(self):
def test_impl(df):
return ('2011-10-23' <= df.A).values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = sdc.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_df(self):
def test_impl(df):
df = pd.DataFrame({'A': pd.DatetimeIndex(df['str_date'])})
return df.A
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_date(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).date
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_max(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).max()
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
self.assertEqual(hpat_func(df), test_impl(df))
def test_datetime_index_min(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).min()
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
self.assertEqual(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_days(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.days
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_seconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.seconds
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_microseconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.microseconds
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_nanoseconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.nanoseconds
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_ret(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date'])
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
pd.testing.assert_index_equal(hpat_func(df), test_impl(df),
check_names=False)
def test_datetime_index_year(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).year
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_month(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).month
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_day(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).day
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_hour(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).hour
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_minute(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).minute
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_second(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).second
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_microsecond(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).microsecond
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_nanosecond(self):
def test_impl(df):
return | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from ... import opcodes as OperandDef
from ...serialize import AnyField, StringField, ListField
from ...utils import recursive_tile
from ...tensor.base import sort
from ..utils import build_empty_df, build_empty_series
from ..core import DATAFRAME_TYPE, SERIES_TYPE
from ..operands import DataFrameOperand, DataFrameOperandMixin
class DataFrameAstype(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.ASTYPE
_dtype_values = AnyField('dtype_values')
_errors = StringField('errors')
_category_cols = ListField('category_cols')
def __init__(self, dtype_values=None, errors=None, category_cols=None, output_types=None, **kw):
super().__init__(_dtype_values=dtype_values, _errors=errors, _category_cols=category_cols,
_output_types=output_types, **kw)
@property
def dtype_values(self):
return self._dtype_values
@property
def errors(self):
return self._errors
@property
def category_cols(self):
return self._category_cols
@classmethod
def _tile_one_chunk(cls, op):
c = op.inputs[0].chunks[0]
chunk_op = op.copy().reset_key()
chunk_params = op.outputs[0].params.copy()
chunk_params['index'] = c.index
out_chunks = [chunk_op.new_chunk([c], **chunk_params)]
new_op = op.copy()
return new_op.new_tileables(op.inputs, nsplits=op.inputs[0].nsplits,
chunks=out_chunks, **op.outputs[0].params.copy())
@classmethod
def _tile_series_index(cls, op):
in_series = op.inputs[0]
out = op.outputs[0]
unique_chunk = None
if op.dtype_values == 'category' and isinstance(op.dtype_values, str):
unique_chunk = recursive_tile(sort(in_series.unique())).chunks[0]
chunks = []
for c in in_series.chunks:
chunk_op = op.copy().reset_key()
params = c.params.copy()
params['dtype'] = out.dtype
if unique_chunk is not None:
chunk_op._category_cols = [in_series.name]
new_chunk = chunk_op.new_chunk([c, unique_chunk], **params)
else:
new_chunk = chunk_op.new_chunk([c], **params)
chunks.append(new_chunk)
new_op = op.copy()
return new_op.new_tileables(op.inputs, nsplits=in_series.nsplits,
chunks=chunks, **out.params.copy())
@classmethod
def _tile_dataframe(cls, op):
in_df = op.inputs[0]
out = op.outputs[0]
cum_nsplits = np.cumsum((0,) + in_df.nsplits[1])
out_chunks = []
if op.dtype_values == 'category':
# all columns need unique values
for c in in_df.chunks:
chunk_op = op.copy().reset_key()
params = c.params.copy()
dtypes = out.dtypes[cum_nsplits[c.index[1]]: cum_nsplits[c.index[1] + 1]]
params['dtypes'] = dtypes
chunk_op._category_cols = list(c.columns_value.to_pandas())
unique_chunks = []
for col in c.columns_value.to_pandas():
unique_chunks.append(recursive_tile(sort(in_df[col].unique())).chunks[0])
new_chunk = chunk_op.new_chunk([c] + unique_chunks, **params)
out_chunks.append(new_chunk)
elif isinstance(op.dtype_values, dict) and 'category' in op.dtype_values.values():
# some columns' types are category
category_cols = [c for c, v in op.dtype_values.items()
if isinstance(v, str) and v == 'category']
unique_chunks = dict((col, recursive_tile(sort(in_df[col].unique())).chunks[0])
for col in category_cols)
for c in in_df.chunks:
chunk_op = op.copy().reset_key()
params = c.params.copy()
dtypes = out.dtypes[cum_nsplits[c.index[1]]: cum_nsplits[c.index[1] + 1]]
params['dtypes'] = dtypes
chunk_category_cols = []
chunk_unique_chunks = []
for col in c.columns_value.to_pandas():
if col in category_cols:
chunk_category_cols.append(col)
chunk_unique_chunks.append(unique_chunks[col])
chunk_op._category_cols = chunk_category_cols
new_chunk = chunk_op.new_chunk([c] + chunk_unique_chunks, **params)
out_chunks.append(new_chunk)
else:
for c in in_df.chunks:
chunk_op = op.copy().reset_key()
params = c.params.copy()
dtypes = out.dtypes[cum_nsplits[c.index[1]]: cum_nsplits[c.index[1] + 1]]
params['dtypes'] = dtypes
new_chunk = chunk_op.new_chunk([c], **params)
out_chunks.append(new_chunk)
new_op = op.copy()
return new_op.new_dataframes(op.inputs, nsplits=in_df.nsplits,
chunks=out_chunks, **out.params.copy())
@classmethod
def tile(cls, op):
if len(op.inputs[0].chunks) == 1:
return cls._tile_one_chunk(op)
elif isinstance(op.inputs[0], DATAFRAME_TYPE):
return cls._tile_dataframe(op)
else:
return cls._tile_series_index(op)
@classmethod
def execute(cls, ctx, op):
in_data = ctx[op.inputs[0].key]
if not isinstance(op.dtype_values, dict):
if op.category_cols is not None:
uniques = [ctx[c.key] for c in op.inputs[1:]]
dtype = dict((col, CategoricalDtype(unique_values)) for
col, unique_values in zip(op.category_cols, uniques))
ctx[op.outputs[0].key] = in_data.astype(dtype, errors=op.errors)
elif isinstance(in_data, pd.Index):
ctx[op.outputs[0].key] = in_data.astype(op.dtype_values)
else:
ctx[op.outputs[0].key] = in_data.astype(op.dtype_values, errors=op.errors)
else:
selected_dtype = dict((k, v) for k, v in op.dtype_values.items()
if k in in_data.columns)
if op.category_cols is not None:
uniques = [ctx[c.key] for c in op.inputs[1:]]
for col, unique_values in zip(op.category_cols, uniques):
selected_dtype[col] = CategoricalDtype(unique_values)
ctx[op.outputs[0].key] = in_data.astype(selected_dtype, errors=op.errors)
def __call__(self, df):
if isinstance(df, DATAFRAME_TYPE):
empty_df = build_empty_df(df.dtypes)
new_df = empty_df.astype(self.dtype_values, errors=self.errors)
dtypes = []
for dt, new_dt in zip(df.dtypes, new_df.dtypes):
if new_dt != dt and isinstance(new_dt, CategoricalDtype):
dtypes.append(CategoricalDtype())
else:
dtypes.append(new_dt)
dtypes = | pd.Series(dtypes, index=new_df.dtypes.index) | pandas.Series |
'''
https://www.scitepress.org/Papers/2015/55519/55519.pdf
http://archive.ics.uci.edu/ml/datasets/Wine+Quality
'''
import os
import pandas as pd
from scipy.io import arff
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, OrdinalEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV
from sklearn.metrics import mean_absolute_error
dataset_r = pd.read_csv(r'..\..\data\regression\wine-quality\winequality-red.csv', delimiter=';', ) # header=None, usecols=[3,6]
dataset_w = | pd.read_csv(r'..\..\data\regression\wine-quality\winequality-white.csv', delimiter=';') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Updating EDDI on a monthly basis.
Run this using crontab once a month this to pull netcdf files from the
NOAA's PSD FTP server, transform them to fit in the app, and either
append them to an existing file, or build the data set from scratch. This
also rebuilds each percentile netcdf entirely because those are rank based.
For more information check Get_WWDT.py
Created on Fri Feb 10 14:33:38 2019
@author: User
"""
import calendar
import datetime as dt
import ftplib
from glob import glob
from netCDF4 import Dataset
import numpy as np
import os
from osgeo import gdal
import pandas as pd
import pathlib
import sys
from tqdm import tqdm
import xarray as xr
# Refactor all of this
pwd = str(pathlib.Path(__file__).parent.absolute())
data_path = os.path.join(pwd, "..")
sys.path.insert(0, data_path)
from functions import isInt, toNetCDF, toNetCDFAlbers, toNetCDFPercentile
# gdal.PushErrorHandler('CPLQuietErrorHandler')
os.environ['GDAL_PAM_ENABLED'] = 'NO'
# There are often missing epsg codes in the gcs.csv file, but proj4 works
proj = ('+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 ' +
'+ellps=GRS80 +datum=NAD83 +units=m no_defs')
# Get resolution from file call
try:
res = float(sys.argv[1])
except:
res = 0.25
# In[] Data source and target directory
ftp_path = 'ftp://ftp.cdc.noaa.gov/Projects/EDDI/CONUS_archive/data'
temp_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/eddi')
pc_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/percentiles')
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
if not os.path.exists(pc_folder):
os.makedirs(pc_folder)
# In[] Index options
indices = ['eddi1', 'eddi2', 'eddi3', 'eddi4', 'eddi5', 'eddi6', 'eddi7',
'eddi8', 'eddi9', 'eddi10', 'eddi11', 'eddi12']
# In[] Define scraping routine
def getEDDI(scale, date, temp_folder, write=False):
'''
These come out daily, but each represents the accumulated conditions of the
prior 30 days. Since we want one value per month we are only downloading
the last day of the month. I'm not sure whether it will be possible to
append this directly to an exiting netcdf or if we need to write to a file
first.
'''
year = date.year
month = date.month
last_day = calendar.monthrange(year, month)[1]
if not write:
memory_file = []
def appendline(line):
memory_file.append(line)
try:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day)
ftp.retrlines('RETR ' + file_name, appendline)
except:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day-1)
ftp.retrlines('RETR ' + file_name, appendline)
return memory_file
else:
def writeline(line):
local_file.write(line + "\n")
local_file = open(os.path.join(temp_folder, 'eddi.asc'), 'w')
try:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day)
ftp.retrlines('RETR ' + file_name, writeline)
except:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day - 1)
ftp.retrlines('RETR ' + file_name, writeline)
local_file.close()
return os.path.join(temp_folder, 'eddi.asc')
# In[] Today's date, month, and year
todays_date = dt.datetime.today()
today = np.datetime64(todays_date)
print("##")
print("#####")
print("############")
print("#######################")
print("#######################################")
print("####################################################")
print("\nRunning Get_EDDI.py using a " + str(res) + " degree resolution:\n")
print(str(today) + '\n')
# In[] Get time series of currently available values
# Connect to FTP
ftp = ftplib.FTP('ftp.cdc.noaa.gov', 'anonymous', '<EMAIL>')
for index in indices:
ftp.cwd('/Projects/EDDI/CONUS_archive/data/')
print('\n' + index)
original_path = os.path.join(data_path, "data/droughtindices/netcdfs/",
index + '.nc')
albers_path = os.path.join(data_path, "data/droughtindices/netcdfs/albers",
index + '.nc')
percentile_path = os.path.join(data_path,
"data/droughtindices/netcdfs/percentiles",
index + '.nc')
scale = index[-2:]
scale = int("".join([s for s in scale if isInt(s)]))
# Delete existing contents of temporary folder
temps = glob(os.path.join(temp_folder, "*"))
for t in temps:
os.remove(t)
####### If we are only missing some dates #################################
if os.path.exists(original_path):
with xr.open_dataset(original_path) as data:
dates = pd.DatetimeIndex(data.time.data)
data.close()
# Extract dates
d1 = dates[0]
d2 = dates[-1]
# Get a list of the dates already in the netcdf file
existing_dates = pd.date_range(d1, d2, freq="M")
# Get all of the last day of month files for the index
ftp_years = ftp.nlst()
ftp_years = [f for f in ftp_years if isInt(f)]
# First Date
ftp.cwd(os.path.join('/Projects/EDDI/CONUS_archive/data/',
ftp_years[0]))
ftp_files = ftp.nlst()
ftp_files = [f for f in ftp_files
if f[-17:-13] == "{:02d}mn".format(scale)]
ftp_first = ftp_files[0]
first_date = pd.to_datetime(ftp_first[-12:-4], format='%Y%m%d')
# Last Date
ftp.cwd(os.path.join('/Projects/EDDI/CONUS_archive/data/',
ftp_years[-1]))
ftp_files = ftp.nlst()
ftp_files = [f for f in ftp_files
if f[-17:-13] == "{:02d}mn".format(scale)]
ftp_last = ftp_files[-1]
last_date = pd.to_datetime(ftp_last[-12: -4], format='%Y%m%d')
# All dates available
available_dates = | pd.date_range(first_date, last_date, freq='M') | pandas.date_range |
import os
import pickle
import numpy as np
import pandas as pd
import nibabel as nib
from statsmodels.gam.api import BSplines
from .neuroCombat import make_design_matrix, adjust_data_final
def harmonizationApply(data, covars, model,return_stand_mean=False):
"""
Applies harmonization model with neuroCombat functions to new data.
Arguments
---------
data : a numpy array
data to harmonize with ComBat, dimensions are N_samples x N_features
covars : a pandas DataFrame
contains covariates to control for during harmonization
all covariates must be encoded numerically (no categorical variables)
must contain a single column "SITE" with site labels for ComBat
dimensions are N_samples x (N_covariates + 1)
model : a dictionary of model parameters
the output of a call to harmonizationLearn()
Returns
-------
bayes_data : a numpy array
harmonized data, dimensions are N_samples x N_features
"""
# transpose data as per ComBat convention
data = data.T
# prep covariate data
batch_col = covars.columns.get_loc('SITE')
isTrainSite = covars['SITE'].isin(model['SITE_labels'])
cat_cols = []
num_cols = [covars.columns.get_loc(c) for c in covars.columns if c!='SITE']
covars = np.array(covars, dtype='object')
# load the smoothing model
smooth_model = model['smooth_model']
smooth_cols = smooth_model['smooth_cols']
### additional setup code from neuroCombat implementation:
# convert training SITEs in batch col to integers
site_dict = dict(zip(model['SITE_labels'], np.arange(len(model['SITE_labels']))))
covars[:,batch_col] = np.vectorize(site_dict.get)(covars[:,batch_col],-1)
# compute samples_per_batch for training data
sample_per_batch = [np.sum(covars[:,batch_col]==i) for i in list(site_dict.values())]
sample_per_batch = np.asarray(sample_per_batch)
# create dictionary that stores batch info
batch_levels = np.unique(list(site_dict.values()),return_counts=False)
info_dict = {
'batch_levels': batch_levels.astype('int'),
'n_batch': len(batch_levels),
'n_sample': int(covars.shape[0]),
'sample_per_batch': sample_per_batch.astype('int'),
'batch_info': [list(np.where(covars[:,batch_col]==idx)[0]) for idx in batch_levels]
}
covars[~isTrainSite, batch_col] = 0
covars[:,batch_col] = covars[:,batch_col].astype(int)
###
# isolate array of data in training site
# apply ComBat without re-learning model parameters
design = make_design_matrix(covars, batch_col, cat_cols, num_cols,nb_class = len(model['SITE_labels']))
design[~isTrainSite,0:len(model['SITE_labels'])] = np.nan
### additional setup if smoothing is performed
if smooth_model['perform_smoothing']:
# create cubic spline basis for smooth terms
X_spline = covars[:, smooth_cols].astype(float)
bs_basis = smooth_model['bsplines_constructor'].transform(X_spline)
# construct formula and dataframe required for gam
formula = 'y ~ '
df_gam = {}
for b in batch_levels:
formula = formula + 'x' + str(b) + ' + '
df_gam['x' + str(b)] = design[:, b]
for c in num_cols:
if c not in smooth_cols:
formula = formula + 'c' + str(c) + ' + '
df_gam['c' + str(c)] = covars[:, c].astype(float)
formula = formula[:-2] + '- 1'
df_gam = | pd.DataFrame(df_gam) | pandas.DataFrame |
from __future__ import print_function
import unittest
from unittest import mock
from io import BytesIO, StringIO
import random
import six
import os
import re
import logging
import numpy as np
import pandas as pd
from . import utils as test_utils
import dataprofiler as dp
from dataprofiler.profilers.profile_builder import StructuredColProfiler, \
UnstructuredProfiler, UnstructuredCompiler, StructuredProfiler, Profiler
from dataprofiler.profilers.profiler_options import ProfilerOptions, \
StructuredOptions, UnstructuredOptions
from dataprofiler.profilers.column_profile_compilers import \
ColumnPrimitiveTypeProfileCompiler, ColumnStatsProfileCompiler, \
ColumnDataLabelerCompiler
from dataprofiler import StructuredDataLabeler, UnstructuredDataLabeler
from dataprofiler.profilers.helpers.report_helpers import _prepare_report
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def setup_save_mock_open(mock_open):
mock_file = BytesIO()
mock_file.close = lambda: None
mock_open.side_effect = lambda *args: mock_file
return mock_file
class TestStructuredProfiler(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = pd.read_csv(cls.input_file_path)
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
with test_utils.mock_timeit():
cls.trained_schema = dp.StructuredProfiler(
cls.aws_dataset, len(cls.aws_dataset), options=profiler_options)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_bad_input_data(self, *mocks):
allowed_data_types = (r"\(<class 'list'>, "
r"<class 'pandas.core.series.Series'>, "
r"<class 'pandas.core.frame.DataFrame'>\)")
bad_data_types = [1, {}, np.inf, 'sdfs']
for data in bad_data_types:
with self.assertRaisesRegex(TypeError,
r"Data must either be imported using "
r"the data_readers or using one of the "
r"following: " + allowed_data_types):
StructuredProfiler(data)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_list_data(self, *mocks):
data = [[1, 1],
[None, None],
[3, 3],
[4, 4],
[5, 5],
[None, None],
[1, 1]]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual("<class 'list'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0, 1], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# validates the sample out maintains the same visual data format as the
# input.
self.assertListEqual(['5', '1', '1', '3', '4'],
profiler.profile[0].sample)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_pandas_series_data(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# test properties when series has name
data.name = 'test'
profiler = dp.StructuredProfiler(data)
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual(['test'], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._merge_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
def test_add_profilers(self, *mocks):
data = pd.DataFrame([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data[:2])
profile2 = dp.StructuredProfiler(data[2:])
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `int` are '
'not of the same profiler type.'):
profile1 + 3
# test mismatched profiles
profile2._profile.pop(0)
profile2._col_name_to_idx.pop(0)
with self.assertRaisesRegex(ValueError,
"Cannot merge empty profiles."):
profile1 + profile2
# test mismatched profiles due to options
profile2._profile.append(None)
profile2._col_name_to_idx[0] = [0]
with self.assertRaisesRegex(ValueError,
'The two profilers were not setup with the '
'same options, hence they do not calculate '
'the same profiles and cannot be added '
'together.'):
profile1 + profile2
# test success
profile1._profile = [1]
profile1._col_name_to_idx = {"test": [0]}
profile2._profile = [2]
profile2._col_name_to_idx = {"test": [0]}
merged_profile = profile1 + profile2
self.assertEqual(3, merged_profile._profile[
merged_profile._col_name_to_idx["test"][0]])
self.assertIsNone(merged_profile.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", merged_profile.file_type)
self.assertEqual(2, merged_profile.row_has_null_count)
self.assertEqual(2, merged_profile.row_is_null_count)
self.assertEqual(7, merged_profile.total_samples)
self.assertEqual(5, len(merged_profile.hashed_row_dict))
self.assertDictEqual({'row_stats': 2}, merged_profile.times)
# test success if drawn from multiple files
profile2.encoding = 'test'
profile2.file_type = 'test'
merged_profile = profile1 + profile2
self.assertEqual('multiple files', merged_profile.encoding)
self.assertEqual('multiple files', merged_profile.file_type)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._get_correlation')
def test_stream_profilers(self, *mocks):
mocks[0].return_value = None
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None],
[None, 5.0],
[None, 5.0],
[None, None],
['test3', 7.0]])
# check prior to update
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data[:3])
self.assertEqual(1, profiler.row_has_null_count)
self.assertEqual(0, profiler.row_is_null_count)
self.assertEqual(3, profiler.total_samples)
self.assertEqual(2, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# check after update
with test_utils.mock_timeit():
profiler.update_profile(data[3:])
self.assertIsNone(profiler.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", profiler.file_type)
self.assertEqual(5, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(8, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2}, profiler.times)
def test_correct_unique_row_ratio_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(1.0, self.trained_schema._get_unique_row_ratio())
def test_correct_rows_ingested(self):
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_null_row_ratio_test(self):
self.assertEqual(2999, self.trained_schema.row_has_null_count)
self.assertEqual(1.0, self.trained_schema._get_row_has_null_ratio())
self.assertEqual(0, self.trained_schema.row_is_null_count)
self.assertEqual(0, self.trained_schema._get_row_is_null_ratio())
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_duplicate_row_count_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(0.0, self.trained_schema._get_duplicate_row_count())
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_correlation(self, *mock):
# Use the following formula to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# data with a sole numeric column
data = pd.DataFrame([1.0, 8.0, 1.0, -2.0, 5.0])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1.0]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1}, profiler.times)
# data with one column with non-numeric calues
data = pd.DataFrame([1.0, None, 1.0, None, 5.0])
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with two columns, but one is numerical
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None]])
profiler = dp.StructuredProfiler(data, options=profile_options)
# Even the correlation with itself is NaN because the variance is zero
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, np.nan, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, np.nan, np.nan]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, -0.28527657, 0.18626508],
[-0.28527657, 1, -0.52996792],
[0.18626508, -0.52996792, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values in only one
# column
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, 0.03673504, 0.22844891],
[0.03673504, 1, -0.49072329],
[0.22844891, -0.49072329, 1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numerical columns without nan values
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3']})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numeric column
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3'],
'c': [1, 2, 3]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows
data = pd.DataFrame({'a': [None, 2, 1, np.nan, 5, np.nan, 4, 10, 7, np.nan],
'b': [np.nan, 11, 1, 'nan', 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, np.nan, 6, 8, 1, None]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, np.nan, 2],
'b': [10, 11, 1, 4, 2, 5, np.nan, 3, np.nan, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, np.nan, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [*38/7*, *38/7*, 1, 7, 5, 9, 4, 10, 2],
# [10, 11, 1, 4, 2, 5, *11/2*, 3, 8],
# [1, 5, 3, 5, *4*, 2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.03283837, 0.40038038],
[-0.03283837, 1, -0.30346637],
[0.40038038, -0.30346637, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_correlation(self, *mocks):
# Use the following formular to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# merge between two existing correlations
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data1, options=profile_options)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2},
merged_profile.times)
# merge between an existing corr and None correlation (without data)
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(None, options=profile_options)
profile2 = dp.StructuredProfiler(data, options=profile_options)
# TODO: remove the mock below when merge profile is update
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1},
merged_profile.times)
# Merge between existing data and empty data that still has samples
data = pd.DataFrame({'a': [1, 2, 4, np.nan, None, np.nan],
'b': [5, 7, 1, np.nan, np.nan, 'nan']})
data1 = data[:3]
data2 = data[3:]
profile1 = dp.StructuredProfiler(data1, options=profile_options)
expected_corr_mat = np.array([
[1, -0.78571429],
[-0.78571429, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profile1.correlation_matrix)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
def test_correlation_update(self):
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# Test with all numeric columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.4907239],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2}, profiler.times)
# Test when there's a non-numeric column
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, np.nan],
[-0.26559388521279237, 1.0, np.nan],
[np.nan, np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with multiple numerical and non-numeric columns, with nan values in only one column
# NaNs imputed to (9+4+10)/3
data = pd.DataFrame({'a': [7, 2, 1, 7, 5, 9, 4, 10, np.nan, np.nan],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
'd': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[ 1, 0.04721482, np.nan, -0.09383408],
[ 0.04721482, 1, np.nan,-0.49072329],
[np.nan, np.nan, np.nan, np.nan],
[-0.09383408, -0.49072329, np.nan, 1]]
)
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows, all null rows are dropped
data = pd.DataFrame({'a': [np.nan, 2, 1, None, 5, np.nan, 4, 10, 7, 'NaN'],
'b': [np.nan, 11, 1, np.nan, 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, None, 6, 8, 1, np.nan]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, 'nan', 2],
'b': [10, 11, 1, 4, 2, 5, 'NaN', 3, None, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, None, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [*13/3*, *13/3*, 1, 7, 5]
# [10, 11, 1, 4, 2]
# [1, 5, 3, 5, *7/2*]
# then updated with correlation (9th row dropped) between
# [9, 4, 10, 2],
# [5, *16/3*, 3, 8],
# [2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.16079606, 0.43658332],
[-0.16079606, 1, -0.2801748],
[0.43658332, -0.2801748, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_chi2(self, *mocks):
# Empty
data = pd.DataFrame([])
profiler = dp.StructuredProfiler(data)
self.assertIsNone(profiler.chi2_matrix)
# Single column
data = pd.DataFrame({'a': ["y", "y", "n", "n", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([1])
self.assertEqual(expected_mat, profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_chi2(self, *mocks):
# Merge empty data
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler1 = dp.StructuredProfiler(None)
profiler2 = dp.StructuredProfiler(data)
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_update_chi2(self, *mocks):
# Update with empty data
data1 = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data2 = pd.DataFrame({'a': [],
'b': [],
'c': []})
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
def test_correct_datatime_schema_test(self):
profile_idx = self.trained_schema._col_name_to_idx["datetime"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = \
profile.profiles['data_type_profile']._profiles["datetime"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(2, profile.null_count)
six.assertCountEqual(self, ['nan'], profile.null_types)
self.assertEqual(['%m/%d/%y %H:%M'], col_schema_info['date_formats'])
def test_correct_integer_column_detection_src(self):
profile_idx = self.trained_schema._col_name_to_idx["src"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(3, profile.null_count)
def test_correct_integer_column_detection_int_col(self):
profile_idx = self.trained_schema._col_name_to_idx["int_col"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(0, profile.null_count)
def test_correct_integer_column_detection_port(self):
profile_idx = self.trained_schema._col_name_to_idx["srcport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_correct_integer_column_detection_destport(self):
profile_idx = self.trained_schema._col_name_to_idx["destport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_report(self):
report = self.trained_schema.report()
self.assertListEqual(list(report.keys()), [
'global_stats', 'data_stats'])
self.assertListEqual(
list(report['global_stats']),
[
"samples_used", "column_count", "row_count",
"row_has_null_ratio", 'row_is_null_ratio',
"unique_row_ratio", "duplicate_row_count", "file_type",
"encoding", "correlation_matrix", "chi2_matrix", "profile_schema", "times"
]
)
flat_report = self.trained_schema.report(
report_options={"output_format": "flat"})
self.assertEqual(test_utils.get_depth(flat_report), 1)
with mock.patch('dataprofiler.profilers.helpers.report_helpers'
'._prepare_report') as pr_mock:
self.trained_schema.report(
report_options={"output_format": 'pretty'})
# Once for global_stats, once for each of 16 columns
self.assertEqual(pr_mock.call_count, 17)
def test_report_schema_and_data_stats_match_order(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report()
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
expected_schema = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_schema, schema)
# Check that the column order in the report matches the column order
# In the schema (and in the data)
for name in schema:
for idx in schema[name]:
# Use min of column to validate column order amongst duplicates
col_min = data.iloc[0, idx]
self.assertEqual(name, data_stats[idx]["column_name"])
self.assertEqual(col_min, data_stats[idx]["statistics"]["min"])
def test_pretty_report_doesnt_cast_schema(self):
report = self.trained_schema.report(
report_options={"output_format": "pretty"})
# Want to ensure the values of this dict are of type list[int]
# Since pretty "prettifies" lists into strings with ... to shorten
expected_schema = {"datetime": [0], "host": [1], "src": [2],
"proto": [3], "type": [4], "srcport": [5],
"destport": [6], "srcip": [7], "locale": [8],
"localeabbr": [9], "postalcode": [10],
"latitude": [11], "longitude": [12], "owner": [13],
"comment": [14], "int_col": [15]}
self.assertDictEqual(expected_schema,
report["global_stats"]["profile_schema"])
def test_omit_keys_with_duplicate_cols(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={
"omit_keys": ["data_stats.a.statistics.min",
"data_stats.d.statistics.max",
"data_stats.*.statistics.null_types_index"]})
# Correctness of schema asserted in prior test
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
for idx in range(len(report["data_stats"])):
# Assert that min is absent from a's data_stats and not the others
if idx in schema["a"]:
self.assertNotIn("min", data_stats[idx]["statistics"])
else:
self.assertIn("min", report["data_stats"][idx]["statistics"])
# Assert that max is absent from d's data_stats and not the others
if idx in schema["d"]:
self.assertNotIn("max", report["data_stats"][idx]["statistics"])
else:
self.assertIn("max", report["data_stats"][idx]["statistics"])
# Assert that null_types_index not present in any
self.assertNotIn("null_types_index",
report["data_stats"][idx]["statistics"])
def test_omit_cols_preserves_schema(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
omit_cols = ["a", "d"]
omit_idxs = [0, 2, 5]
omit_keys = [f"data_stats.{col}" for col in omit_cols]
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={"omit_keys": omit_keys})
for idx in range(len(report["data_stats"])):
if idx in omit_idxs:
self.assertIsNone(report["data_stats"][idx])
else:
self.assertIsNotNone(report["data_stats"][idx])
# This will keep the data_stats key but remove all columns
report = profiler.report(report_options={"omit_keys": ["data_stats.*"]})
for col_report in report["data_stats"]:
self.assertIsNone(col_report)
def test_report_quantiles(self):
report_none = self.trained_schema.report(
report_options={"num_quantile_groups": None})
report = self.trained_schema.report()
self.assertEqual(report_none, report)
for col in report["data_stats"]:
if col["column_name"] == "int_col":
report_quantiles = col["statistics"]["quantiles"]
break
self.assertEqual(len(report_quantiles), 3)
report2 = self.trained_schema.report(
report_options={"num_quantile_groups": 1000})
for col in report2["data_stats"]:
if col["column_name"] == "int_col":
report2_1000_quant = col["statistics"]["quantiles"]
break
self.assertEqual(len(report2_1000_quant), 999)
self.assertEqual(report_quantiles, {
0: report2_1000_quant[249],
1: report2_1000_quant[499],
2: report2_1000_quant[749],
})
def test_report_omit_keys(self):
# Omit both report keys manually
no_report_keys = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats']})
self.assertCountEqual({}, no_report_keys)
# Omit just data_stats
no_data_stats = self.trained_schema.report(
report_options={"omit_keys": ['data_stats']})
self.assertCountEqual({"global_stats"}, no_data_stats)
# Omit a global stat
no_samples_used = self.trained_schema.report(
report_options={"omit_keys": ['global_stats.samples_used']})
self.assertNotIn("samples_used", no_samples_used["global_stats"])
# Omit all keys
nothing = self.trained_schema.report(
report_options={"omit_keys": ['*']})
self.assertCountEqual({}, nothing)
# Omit every data_stats column
empty_data_stats_cols = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats.*']})
# data_stats key still present, but all columns are None
self.assertCountEqual({"data_stats"}, empty_data_stats_cols)
self.assertTrue(all([rep is None
for rep in empty_data_stats_cols["data_stats"]]))
# Omit specific data_stats column
no_datetime = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.datetime']})
self.assertNotIn("datetime", no_datetime["data_stats"])
# Omit a statistic from each column
no_sum = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.*.statistics.sum']})
self.assertTrue(all(["sum" not in rep["statistics"]
for rep in no_sum["data_stats"]]))
def test_report_compact(self):
report = self.trained_schema.report(
report_options={ "output_format": "pretty" })
omit_keys = [
"data_stats.*.statistics.times",
"data_stats.*.statistics.avg_predictions",
"data_stats.*.statistics.data_label_representation",
"data_stats.*.statistics.null_types_index",
"data_stats.*.statistics.histogram"
]
report = _prepare_report(report, 'pretty', omit_keys)
report_compact = self.trained_schema.report(
report_options={"output_format": "compact"})
self.assertEqual(report, report_compact)
def test_profile_key_name_without_space(self):
def recursive_test_helper(report, prev_key=None):
for key in report:
# do not test keys in 'data_stats' as they contain column names
# neither for 'ave_predictions' and 'data_label_representation'
# as they contain label names
# same for 'null_types_index'
if prev_key not in ['data_stats', 'avg_predictions',
'data_label_representation',
'null_types_index', 'categorical_count']:
# key names should contain only alphanumeric letters or '_'
self.assertIsNotNone(re.match('^[a-zA-Z0-9_]+$', str(key)))
if isinstance(report[key], dict):
recursive_test_helper(report[key], key)
_report = self.trained_schema.report()
recursive_test_helper(_report)
def test_data_label_assigned(self):
# only use 5 samples
trained_schema = dp.StructuredProfiler(self.aws_dataset, samples_per_update=5)
report = trained_schema.report()
has_non_null_column = False
for i in range(len(report['data_stats'])):
# only test non-null columns
if report['data_stats'][i]['data_type'] is not None:
self.assertIsNotNone(report['data_stats'][i]['data_label'])
has_non_null_column = True
if not has_non_null_column:
self.fail(
"Dataset tested did not have a non-null column and therefore "
"could not validate the test.")
def test_text_data_raises_error(self):
text_file_path = os.path.join(
test_root_path, 'data', 'txt/sentence-10x.txt'
)
with self.assertRaisesRegex(TypeError, 'Cannot provide TextData object'
' to StructuredProfiler'):
profiler = dp.StructuredProfiler(dp.Data(text_file_path))
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_row_statistics')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredColProfiler')
def test_sample_size_warning_in_the_profiler(self, *mocks):
# structure data profile mock
sdp_mock = mock.Mock()
sdp_mock.clean_data_and_get_base_stats.return_value = (None, None)
mocks[0].return_value = sdp_mock
data = pd.DataFrame([1, None, 3, 4, 5, None])
with self.assertWarnsRegex(UserWarning,
"The data will be profiled with a sample "
"size of 3. All statistics will be based on "
"this subsample and not the whole dataset."):
profile1 = dp.StructuredProfiler(data, samples_per_update=3)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_min_col_samples_used(self, *mocks):
# No cols sampled since no cols to sample
empty_df = pd.DataFrame([])
empty_profile = dp.StructuredProfiler(empty_df)
self.assertEqual(0, empty_profile._min_col_samples_used)
# Every column fully sampled
full_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
full_profile = dp.StructuredProfiler(full_df)
self.assertEqual(3, full_profile._min_col_samples_used)
# First col sampled only twice, so that is min
sparse_df = pd.DataFrame([[1, None, None],
[1, 1, None],
[1, None, 1]])
sparse_profile = dp.StructuredProfiler(sparse_df, min_true_samples=2,
samples_per_update=1)
self.assertEqual(2, sparse_profile._min_col_samples_used)
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_profile_from_chunk')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_min_true_samples(self, *mocks):
empty_df = pd.DataFrame([])
# Test invalid input
msg = "`min_true_samples` must be an integer or `None`."
with self.assertRaisesRegex(ValueError, msg):
profile = dp.StructuredProfiler(empty_df, min_true_samples="Bloop")
# Test invalid input given to update_profile
profile = dp.StructuredProfiler(empty_df)
with self.assertRaisesRegex(ValueError, msg):
profile.update_profile(empty_df, min_true_samples="Bloop")
# Test None input (equivalent to zero)
profile = dp.StructuredProfiler(empty_df, min_true_samples=None)
self.assertEqual(None, profile._min_true_samples)
# Test valid input
profile = dp.StructuredProfiler(empty_df, min_true_samples=10)
self.assertEqual(10, profile._min_true_samples)
def test_save_and_load(self):
datapth = "dataprofiler/tests/data/"
test_files = ["csv/guns.csv", "csv/iris.csv"]
for test_file in test_files:
# Create Data and StructuredProfiler objects
data = dp.Data(os.path.join(datapth, test_file))
options = ProfilerOptions()
options.set({"correlation.is_enabled": True})
save_profile = dp.StructuredProfiler(data)
# store the expected data_labeler
data_labeler = save_profile.options.data_labeler.data_labeler_object
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler', return_value=data_labeler):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# validate loaded profile has same data labeler class
self.assertIsInstance(
load_profile.options.data_labeler.data_labeler_object,
data_labeler.__class__)
# only checks first columns
# get first column
first_column_profile = load_profile.profile[0]
self.assertIsInstance(
first_column_profile.profiles['data_label_profile']
._profiles['data_labeler'].data_labeler,
data_labeler.__class__)
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
np.testing.assert_equal(save_report, load_report)
def test_save_and_load_no_labeler(self):
# Create Data and UnstructuredProfiler objects
data = pd.DataFrame([1, 2, 3], columns=["a"])
profile_options = dp.ProfilerOptions()
profile_options.set({"data_labeler.is_enabled": False})
save_profile = dp.StructuredProfiler(data, options=profile_options)
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler'):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
self.assertDictEqual(save_report, load_report)
# validate both are still usable after
save_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
load_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_string_index_doesnt_cause_error(self, *mocks):
dp.StructuredProfiler(pd.DataFrame([[1, 2, 3]], index=["hello"]))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_dict_in_data_no_error(self, *mocks):
# validates that _update_row_statistics does not error when trying to
# hash a dict.
profiler = dp.StructuredProfiler(pd.DataFrame([[{'test': 1}], [None]]))
self.assertEqual(1, profiler.row_is_null_count)
self.assertEqual(2, profiler.total_samples)
def test_duplicate_columns(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler = dp.StructuredProfiler(data)
# Ensure columns are correctly allocated to profiles in list
expected_mapping = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_mapping, profiler._col_name_to_idx)
for col in profiler._col_name_to_idx:
for idx in profiler._col_name_to_idx[col]:
# Make sure every index that a column name maps to represents
# A profile for that named column
self.assertEqual(col, profiler._profile[idx].name)
# Check a few stats to ensure calculation with data occurred
# Initialization ensures column ids and profile ids are identical
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
col_sum = col_min + col_max
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
# Check that update works as expected
new_data = pd.DataFrame([[100, 200, 300, 400, 500, 600]],
columns=["a", "b", "a", "b", "c", "d"])
profiler.update_profile(new_data)
self.assertDictEqual(expected_mapping, profiler._col_name_to_idx)
for col in profiler._col_name_to_idx:
for idx in profiler._col_name_to_idx[col]:
# Make sure every index that a column name maps to represents
# A profile for that named column
self.assertEqual(col, profiler._profile[idx].name)
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = new_data.iloc[0, col_idx]
col_sum = col_min + col_max + data.iloc[1, col_idx]
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
def test_unique_col_permutation(self, *mocks):
data = pd.DataFrame([[1, 2, 3, 4],
[5, 6, 7, 8]],
columns=["a", "b", "c", "d"])
perm_data = pd.DataFrame([[4, 3, 2, 1],
[8, 7, 6, 5]],
columns=["d", "c", "b", "a"])
# Test via add
first_profiler = dp.StructuredProfiler(data)
perm_profiler = dp.StructuredProfiler(perm_data)
profiler = first_profiler + perm_profiler
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
# Sum is doubled since it was updated with the same vals
col_sum = 2 * (col_min + col_max)
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
# Test via update
profiler = dp.StructuredProfiler(data)
profiler.update_profile(perm_data)
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
# Sum is doubled since it was updated with the same vals
col_sum = 2 * (col_min + col_max)
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
def test_get_and_validate_schema_mapping(self):
unique_schema_1 = {"a": [0], "b": [1], "c": [2]}
unique_schema_2 = {"a": [2], "b": [0], "c": [1]}
unique_schema_3 = {"a": [0], "b": [1], "d": [2]}
msg = "Columns do not match, cannot update or merge profiles."
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
unique_schema_1,unique_schema_3)
expected_schema = {0: 0, 1: 1, 2: 2}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(unique_schema_1, {})
self.assertDictEqual(actual_schema, expected_schema)
expected_schema = {0: 2, 1: 0, 2: 1}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(unique_schema_1, unique_schema_2)
self.assertDictEqual(actual_schema, expected_schema)
dupe_schema_1 = {"a": [0], "b": [1, 2], "c": [3, 4, 5]}
dupe_schema_2 = {"a": [0], "b": [1, 3], "c": [2, 4, 5]}
dupe_schema_3 = {"a": [0, 1], "b": [2, 3, 4], "c": [5]}
four_col_schema = {"a": [0], "b": [1, 2], "c": [3, 4, 5], "d": [6]}
msg = ("Different number of columns detected for "
"'a', cannot update or merge profiles.")
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, dupe_schema_3)
msg = ("Different column indices under "
"duplicate name 'b', cannot update "
"or merge unless schema is identical.")
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, dupe_schema_2)
msg = "Attempted to merge profiles with different numbers of columns"
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, four_col_schema)
expected_schema = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(dupe_schema_1, dupe_schema_1)
self.assertDictEqual(actual_schema, expected_schema)
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
def test_diff(self, *mocks):
# Data labeler compiler diff
mocks[0].return_value = {
'statistics': {
'avg_predictions': {
'a': 'unchanged'
},
'label_representation': {
'a': 'unchanged'
}
},
'data_label': [[], ['a'], []]
}
# stats compiler diff
mocks[1].return_value = {
'order': ['ascending', 'descending'],
'categorical': 'unchanged',
'statistics': {
'all_compiler_stats': 'unchanged'
}
}
# primitive stats compiler diff
mocks[2].return_value = {
'data_type_representation': {
'all_data_types': 'unchanged'
},
'data_type': 'unchanged',
'statistics': {
'numerical_statistics_here': "unchanged"
}
}
data1 = pd.DataFrame([[1, 2], [5, 6]], columns=["a", "b"])
data2 = pd.DataFrame([[4, 3], [8, 7], [None, None], [9, 10]],
columns=["a", "b"])
options = dp.ProfilerOptions()
options.structured_options.correlation.is_enabled = True
profile1 = dp.StructuredProfiler(data1, options=options)
options2 = dp.ProfilerOptions()
options2.structured_options.correlation.is_enabled = True
profile2 = dp.StructuredProfiler(data2, options=options2)
expected_diff = {
'global_stats': {
'samples_used': -2,
'column_count': 'unchanged',
'row_count': -2,
'row_has_null_ratio': -0.25,
'row_is_null_ratio': -0.25,
'unique_row_ratio': 'unchanged',
'duplicate_row_count': -0.25,
'file_type': 'unchanged',
'encoding': 'unchanged',
'correlation_matrix':
np.array([[1.11022302e-16, 3.13803955e-02],
[3.13803955e-02, 0.00000000e+00]],
dtype=np.float),
'chi2_matrix':
np.array([[ 0. , -0.04475479],
[-0.04475479, 0. ]],
dtype=np.float),
'profile_schema':
[{}, {'a': 'unchanged', 'b': 'unchanged'}, {}]},
'data_stats': [
{
'column_name': 'a',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here':
'unchanged',
'all_compiler_stats':
'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': -2,
'null_count': -1,
'null_types': [[], [], ['nan']],
'null_types_index': [{}, {}, {'nan': {2}}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
},
{
'column_name': 'b',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here': 'unchanged',
'all_compiler_stats': 'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': -2,
'null_count': -1,
'null_types': [[], [], ['nan']],
'null_types_index': [{}, {}, {'nan': {2}}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
}
]
}
diff = profile1.diff(profile2)
expected_corr_mat = expected_diff["global_stats"].pop("correlation_matrix")
diff_corr_mat = diff["global_stats"].pop("correlation_matrix")
expected_chi2_mat = expected_diff["global_stats"].pop("chi2_matrix")
diff_chi2_mat = diff["global_stats"].pop("chi2_matrix")
np.testing.assert_array_almost_equal(expected_corr_mat, diff_corr_mat)
np.testing.assert_array_almost_equal(expected_chi2_mat, diff_chi2_mat)
self.assertDictEqual(expected_diff, diff)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
def test_diff_type_checking(self, *mocks):
data = pd.DataFrame([[1, 2], [5, 6]],
columns=["a", "b"])
profile = dp.StructuredProfiler(data)
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `str` are not of '
'the same profiler type.'):
profile.diff("ERROR")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
def test_diff_with_different_schema(self, *mocks):
data1 = pd.DataFrame([[1, 2], [5, 6]],
columns=["G", "b"])
data2 = pd.DataFrame([[4, 3, 1], [8, 7, 3], [None, None, 1], [9, 1, 10]],
columns=["a", "b", "c"])
# Test via add
profile1 = dp.StructuredProfiler(data1)
profile2 = dp.StructuredProfiler(data2)
expected_diff = {
'global_stats': {
'file_type': 'unchanged',
'encoding': 'unchanged',
'samples_used': -2,
'column_count': -1,
'row_count': -2,
'row_has_null_ratio': -0.25,
'row_is_null_ratio': 'unchanged',
'unique_row_ratio': 'unchanged',
'duplicate_row_count': 'unchanged',
'correlation_matrix': None,
'chi2_matrix': None,
'profile_schema': [{'G': [0]},
{'b': 'unchanged'},
{'a': [0], 'c': [2]}]},
'data_stats': []
}
self.assertDictEqual(expected_diff, profile1.diff(profile2))
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
@mock.patch("sys.stderr", new_callable=StringIO)
def test_logs(self, mock_stderr, *mocks):
options = StructuredOptions()
options.multiprocess.is_enabled = False
# Capture logs of level INFO and above
with self.assertLogs('DataProfiler.profilers.profile_builder',
level='INFO') as logs:
StructuredProfiler(pd.DataFrame([[0, 1], [2, 3]]), options=options)
# Logs to update user on nulls and statistics
self.assertEqual(['INFO:DataProfiler.profilers.profile_builder:'
'Finding the Null values in the columns... ',
'INFO:DataProfiler.profilers.profile_builder:'
'Calculating the statistics... '],
logs.output)
# Ensure tqdm printed progress bar
self.assertIn('#' * 10, mock_stderr.getvalue())
# Clear stderr
mock_stderr.seek(0)
mock_stderr.truncate(0)
# Now tqdm shouldn't be printed
dp.set_verbosity(logging.WARNING)
StructuredProfiler(pd.DataFrame([[0, 1], [2, 3]]))
# Ensure no progress bar printed
self.assertNotIn('#' * 10, mock_stderr.getvalue())
def test_unique_row_ratio_empty_profiler(self):
profiler = StructuredProfiler(pd.DataFrame([]))
self.assertEqual(0, profiler._get_unique_row_ratio())
class TestStructuredColProfilerClass(unittest.TestCase):
def setUp(self):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = pd.read_csv(cls.input_file_path)
def test_base_props(self):
src_column = self.aws_dataset.src
src_profile = StructuredColProfiler(
src_column, sample_size=len(src_column))
self.assertIsInstance(src_profile.profiles['data_type_profile'],
ColumnPrimitiveTypeProfileCompiler)
self.assertIsInstance(src_profile.profiles['data_stats_profile'],
ColumnStatsProfileCompiler)
self.assertIsInstance(src_profile.profiles['data_label_profile'],
ColumnDataLabelerCompiler)
data_types = ['int', 'float', 'datetime', 'text']
six.assertCountEqual(
self, data_types,
list(src_profile.profiles['data_type_profile']._profiles.keys())
)
stats_types = ['category', 'order']
six.assertCountEqual(
self, stats_types,
list(src_profile.profiles['data_stats_profile']._profiles.keys())
)
self.assertEqual(3, src_profile.null_count)
self.assertEqual(2999, src_profile.sample_size)
total_nulls = 0
for _, null_rows in src_profile.null_types_index.items():
total_nulls += len(null_rows)
self.assertEqual(3, total_nulls)
# test updated base props with batch addition
src_profile.update_profile(src_column)
src_profile.update_profile(src_column)
self.assertEqual(3*3, src_profile.null_count)
self.assertEqual(2999*3, src_profile.sample_size)
@mock.patch('dataprofiler.profilers.column_profile_compilers.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.column_profile_compilers.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.column_profile_compilers.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_add_profilers(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None])
profile1 = StructuredColProfiler(data[:2])
profile2 = StructuredColProfiler(data[2:])
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`StructuredColProfiler` and `int` are '
'not of the same profiler type.'):
profile1 + 3
# test mismatched names
profile1.name = 'profile1'
profile2.name = 'profile2'
with self.assertRaisesRegex(ValueError,
'Structured profile names are unmatched: '
'profile1 != profile2'):
profile1 + profile2
# test mismatched profiles due to options
profile2.name = 'profile1'
profile1._profiles = dict(test1=mock.Mock())
profile2.profiles.pop('data_label_profile')
with self.assertRaisesRegex(ValueError,
'Structured profilers were not setup with '
'the same options, hence they do not '
'calculate the same profiles and cannot be '
'added together.'):
profile1 + profile2
# test success
profile1.profiles = dict(test=1)
profile2.profiles = dict(test=2)
merged_profile = profile1 + profile2
self.assertEqual(3, merged_profile.profiles['test'])
self.assertCountEqual(['5.0', '4.0', '3.0', '1.0'], merged_profile.sample)
self.assertEqual(6, merged_profile.sample_size)
self.assertEqual(2, merged_profile.null_count)
self.assertListEqual(['nan'], merged_profile.null_types)
self.assertDictEqual({'nan': {1, 5}}, merged_profile.null_types_index)
# test add with different sampling properties
profile1._min_sample_size = 10
profile2._min_sample_size = 100
profile1._sampling_ratio = 0.5
profile2._sampling_ratio = 0.3
profile1._min_true_samples = 11
profile2._min_true_samples = 1
merged_profile = profile1 + profile2
self.assertEqual(100, merged_profile._min_sample_size)
self.assertEqual(0.5, merged_profile._sampling_ratio)
self.assertEqual(11, merged_profile._min_true_samples)
def test_integrated_merge_diff_options(self):
options = dp.ProfilerOptions()
options.set({'data_labeler.is_enabled': False})
data = pd.DataFrame([1, 2, 3, 4])
profile1 = dp.StructuredProfiler(data, options=options)
profile2 = dp.StructuredProfiler(data)
with self.assertRaisesRegex(ValueError,
'Structured profilers were not setup with '
'the same options, hence they do not '
'calculate the same profiles and cannot be '
'added together.'):
profile1 + profile2
def test_clean_data_and_get_base_stats(self, *mocks):
data = pd.Series([1, None, 3, 4, None, 6],
index=['a', 'b', 'c', 'd', 'e', 'f'])
# validate that if sliced data, still functional
# previously `iloc` was used at:
# `df_series = df_series.loc[sorted(true_sample_list)]`
# which caused errors
#Tests with default null values set
profiler = mock.Mock(spec=StructuredColProfiler)
null_values = {
"": 0,
"nan": re.IGNORECASE,
"none": re.IGNORECASE,
"null": re.IGNORECASE,
" *": 0,
"--*": 0,
"__*": 0,
}
test_utils.set_seed(seed=0)
df_series, base_stats = \
StructuredColProfiler.clean_data_and_get_base_stats(
df_series=data[1:], sample_size=6, null_values=null_values,
min_true_samples=0)
# note data above is a subset `df_series=data[1:]`, 1.0 will not exist
self.assertTrue(np.issubdtype(np.object_, df_series.dtype))
self.assertDictEqual({'sample': ['4.0', '6.0', '3.0'],
'sample_size': 5, 'null_count': 2,
'null_types': dict(nan=['e', 'b']),
'min_id': None, 'max_id': None}, base_stats)
# Tests with some other null values set
null_values = {
"1.0": 0,
"3.0": 0
}
df_series, base_stats = \
StructuredColProfiler.clean_data_and_get_base_stats(
df_series=data, sample_size=6, null_values=null_values,
min_true_samples=0)
self.assertDictEqual({'sample': ["nan", '6.0', '4.0', "nan"],
'sample_size': 6, 'null_count': 2,
'null_types': {'1.0': ['a'], '3.0': ['c']},
'min_id': None, 'max_id': None}, base_stats)
# Tests with no null values set
null_values = {}
df_series, base_stats = \
StructuredColProfiler.clean_data_and_get_base_stats(
df_series=data, sample_size=6, null_values=null_values,
min_true_samples=0)
self.assertDictEqual({'sample': ["3.0", "4.0", '6.0', "nan", "1.0"],
'sample_size': 6, 'null_count': 0,
'null_types': {},
'min_id': None, 'max_id': None}, base_stats)
def test_column_names(self):
data = [['a', 1], ['b', 2], ['c', 3]]
df = pd.DataFrame(data, columns=['letter', 'number'])
profile1 = StructuredColProfiler(df['letter'])
profile2 = StructuredColProfiler(df['number'])
self.assertEqual(profile1.name, 'letter')
self.assertEqual(profile2.name, 'number')
df_series = pd.Series([1, 2, 3, 4, 5])
profile = StructuredColProfiler(df_series)
self.assertEqual(profile.name, df_series.name)
# Ensure issue raised
profile = StructuredColProfiler(df['letter'])
with self.assertRaises(ValueError) as context:
profile.update_profile(df['number'])
self.assertTrue(
'Column names have changed, col number does not match prior name letter',
context
)
def test_update_match_are_abstract(self):
six.assertCountEqual(
self,
{'profile', '_update_helper', 'update'},
dp.profilers.BaseColumnProfiler.__abstractmethods__
)
def test_data_labeler_toggle(self):
src_column = self.aws_dataset.src
structured_options = StructuredOptions()
structured_options.data_labeler.is_enabled = False
std_profile = StructuredColProfiler(src_column,
sample_size=len(src_column))
togg_profile = StructuredColProfiler(src_column,
sample_size=len(src_column),
options=structured_options)
self.assertIn('data_label_profile', std_profile.profiles)
self.assertNotIn('data_label_profile', togg_profile.profiles)
def test_null_count(self):
column = pd.Series([1, float('nan')] * 10)
# test null_count when full sample size
random.seed(0)
profile = StructuredColProfiler(column, sample_size=len(column))
self.assertEqual(10, profile.null_count)
def test_generating_report_ensure_no_error(self):
file_path = os.path.join(test_root_path, 'data', 'csv/diamonds.csv')
data = pd.read_csv(file_path)
profile = dp.StructuredProfiler(data[:1000])
readable_report = profile.report(
report_options={"output_format": "compact"})
def test_get_sample_size(self):
data = pd.DataFrame([0] * int(50e3))
# test data size < min_sample_size = 5000 by default
profiler = dp.StructuredProfiler(pd.DataFrame([]))
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
sample_size = profiler._get_sample_size(data[:1000])
self.assertEqual(1000, sample_size)
# test data size * 0.20 < min_sample_size < data size
sample_size = profiler._get_sample_size(data[:10000])
self.assertEqual(5000, sample_size)
# test min_sample_size > data size * 0.20
sample_size = profiler._get_sample_size(data)
self.assertEqual(10000, sample_size)
# test min_sample_size > data size * 0.10
profiler._sampling_ratio = 0.5
sample_size = profiler._get_sample_size(data)
self.assertEqual(25000, sample_size)
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_profile_from_chunk')
def test_sample_size_passed_to_profile(self, *mocks):
update_mock = mocks[0]
# data setup
data = pd.DataFrame([0] * int(50e3))
# option setup
profiler_options = ProfilerOptions()
profiler_options.structured_options.multiprocess.is_enabled = False
profiler_options.set({'data_labeler.is_enabled': False})
# test data size < min_sample_size = 5000 by default
profiler = dp.StructuredProfiler(data[:1000], options=profiler_options)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
self.assertEqual(1000, update_mock.call_args[0][1])
# test data size * 0.20 < min_sample_size < data size
profiler = dp.StructuredProfiler(data[:10000], options=profiler_options)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
self.assertEqual(5000, update_mock.call_args[0][1])
# test min_sample_size > data size * 0.20
profiler = dp.StructuredProfiler(data, options=profiler_options)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
self.assertEqual(10000, update_mock.call_args[0][1])
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_index_overlap_for_update_profile(self, *mocks):
data = pd.Series([0, None, 1, 2, None])
profile = StructuredColProfiler(data)
self.assertEqual(0, profile._min_id)
self.assertEqual(4, profile._max_id)
self.assertDictEqual(profile.null_types_index, {'nan': {1, 4}})
profile.update_profile(data)
# Now all indices will be shifted by max_id + 1 (5)
# So the 2 None will move from indices 1, 4 to 6, 9
self.assertEqual(0, profile._min_id)
self.assertEqual(9, profile._max_id)
self.assertDictEqual(profile.null_types_index, {'nan': {1, 4, 6, 9}})
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_index_overlap_for_merge(self, *mocks):
data = pd.Series([0, None, 1, 2, None])
profile1 = StructuredColProfiler(data)
profile2 = StructuredColProfiler(data)
# Ensure merged profile included shifted indices
profile3 = profile1 + profile2
self.assertEqual(0, profile3._min_id)
self.assertEqual(9, profile3._max_id)
self.assertDictEqual(profile3.null_types_index, {'nan': {1, 4, 6, 9}})
# Ensure original profiles not overwritten
self.assertEqual(0, profile1._min_id)
self.assertEqual(4, profile1._max_id)
self.assertDictEqual(profile1.null_types_index, {'nan': {1, 4}})
self.assertEqual(0, profile2._min_id)
self.assertEqual(4, profile2._max_id)
self.assertDictEqual(profile2.null_types_index, {'nan': {1, 4}})
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_min_max_id_properly_update(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None, 1])
profile1 = StructuredColProfiler(data[:2])
profile2 = StructuredColProfiler(data[2:])
# Base initialization
self.assertEqual(0, profile1._min_id)
self.assertEqual(1, profile1._max_id)
self.assertEqual(2, profile2._min_id)
self.assertEqual(6, profile2._max_id)
# Needs to work with merge
profile3 = profile1 + profile2
self.assertEqual(0, profile3._min_id)
self.assertEqual(6, profile3._max_id)
# Needs to work with update_profile
profile = StructuredColProfiler(data[:2])
profile.update_profile(data[2:])
self.assertEqual(0, profile._min_id)
self.assertEqual(6, profile._max_id)
@mock.patch('dataprofiler.profilers.data_labeler_column_profile.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
def test_diff(self, *mocks):
# Data labeler compiler diff
mocks[0].return_value = {
'statistics': {
'avg_predictions': {
'a': 'unchanged'
},
'label_representation': {
'a': 'unchanged'
}
},
'data_label': [[], ['a'], []]
}
# stats compiler diff
mocks[1].return_value = {
'order': ['ascending', 'descending'],
'categorical': 'unchanged',
'statistics': {
'all_compiler_stats': 'unchanged'
}
}
# primitive stats compiler diff
mocks[2].return_value = {
'data_type_representation': {
'all_data_types': 'unchanged'
},
'data_type': 'unchanged',
'statistics': {
'numerical_statistics_here': "unchanged"
}
}
data = pd.Series([1, None, 3, 4, 5, None, 1])
data2 = pd.Series(["hello", "goodby", 125, 0])
data.name = "TEST"
data2.name = "TEST"
profile1 = StructuredColProfiler(data)
profile2 = StructuredColProfiler(data2)
expected_diff = {
'column_name': 'TEST',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here': 'unchanged',
'all_compiler_stats': 'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': 3,
'null_count': 2,
'null_types': [['nan'], [], []],
'null_types_index': [{'nan': {1, 5}}, {}, {}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
}
self.assertDictEqual(expected_diff, dict(profile1.diff(profile2)))
@mock.patch('dataprofiler.profilers.profile_builder.UnstructuredCompiler',
spec=UnstructuredCompiler)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=UnstructuredDataLabeler)
class TestUnstructuredProfiler(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
def test_base(self, *mocks):
# ensure can make an empty profiler
profiler = UnstructuredProfiler(None)
self.assertIsNone(profiler.encoding)
self.assertIsNone(profiler.file_type)
self.assertIsNone(profiler._profile)
self.assertIsNone(profiler._samples_per_update)
self.assertEqual(0, profiler._min_true_samples)
self.assertEqual(0, profiler.total_samples)
self.assertEqual(0, profiler._empty_line_count)
self.assertEqual(0, profiler.memory_size)
self.assertEqual(0.2, profiler._sampling_ratio)
self.assertEqual(5000, profiler._min_sample_size)
self.assertEqual([], profiler.sample)
self.assertIsInstance(profiler.options, UnstructuredOptions)
self.assertDictEqual({}, profiler.times)
# can set samples_per_update and min_true_samples
profiler = UnstructuredProfiler(None, samples_per_update=10,
min_true_samples=5)
self.assertEqual(profiler._samples_per_update, 10)
self.assertEqual(profiler._min_true_samples, 5)
# can properties update correctly for data
data = pd.Series(['this', 'is my', '\n\r', 'test'])
profiler = UnstructuredProfiler(data)
self.assertEqual(4, profiler.total_samples)
self.assertCountEqual(['this', 'is my', 'test'], profiler.sample)
self.assertEqual(1, profiler._empty_line_count)
self.assertEqual(15 / 1024 ** 2, profiler.memory_size)
self.assertEqual("<class 'pandas.core.series.Series'>",
profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertIsInstance(profiler._profile, UnstructuredCompiler)
self.assertIn('clean_and_base_stats', profiler.times)
# can properties update correctly for data loaded from file
data = pd.Series(['this', 'is my', '\n\r', 'test'])
mock_data_reader = mock.Mock(spec=dp.data_readers.csv_data.CSVData)
mock_data_reader.data = data
mock_data_reader.data_type = 'csv'
mock_data_reader.file_encoding = 'utf-8'
mock_data_reader.input_file_path = 'fake/path/file.csv'
profiler = UnstructuredProfiler(mock_data_reader)
self.assertEqual(4, profiler.total_samples)
self.assertCountEqual(['this', 'is my', 'test'], profiler.sample)
self.assertEqual(1, profiler._empty_line_count)
self.assertEqual(15 / 1024 ** 2, profiler.memory_size)
self.assertEqual("csv", profiler.file_type)
self.assertEqual("utf-8", profiler.encoding)
self.assertIsInstance(profiler._profile, UnstructuredCompiler)
def test_bad_input_data(self, *mocks):
allowed_data_types = (r"\(<class 'str'>, "
r"<class 'list'>, "
r"<class 'pandas.core.series.Series'>, "
r"<class 'pandas.core.frame.DataFrame'>\)")
bad_data_types = [1, {}, np.inf]
for data in bad_data_types:
with self.assertRaisesRegex(TypeError,
r"Data must either be imported using "
r"the data_readers or using one of the "
r"following: " + allowed_data_types):
UnstructuredProfiler(data)
def test_str_input_data(self, *mocks):
data = 'this is my\n\rtest'
profiler = UnstructuredProfiler(data)
self.assertEqual(1, profiler.total_samples)
self.assertEqual(0, profiler._empty_line_count)
self.assertEqual(16 / 1024 ** 2, profiler.memory_size)
self.assertEqual("<class 'str'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertIsInstance(profiler._profile, UnstructuredCompiler)
def test_list_input_data(self, *mocks):
data = ['this', 'is my', '\n\r', 'test']
profiler = UnstructuredProfiler(data)
self.assertEqual(4, profiler.total_samples)
self.assertEqual(1, profiler._empty_line_count)
self.assertEqual(15 / 1024 ** 2, profiler.memory_size)
self.assertEqual("<class 'list'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertIsInstance(profiler._profile, UnstructuredCompiler)
def test_dataframe_input_data(self, *mocks):
data = pd.DataFrame(['this', 'is my', '\n\r', 'test'])
profiler = UnstructuredProfiler(data)
self.assertEqual(4, profiler.total_samples)
self.assertEqual(1, profiler._empty_line_count)
self.assertEqual(15 / 1024 ** 2, profiler.memory_size)
self.assertEqual("<class 'pandas.core.frame.DataFrame'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertIsInstance(profiler._profile, UnstructuredCompiler)
def test_merge_profiles(self, *mocks):
# can properties update correctly for data
data1 = pd.Series(['this', 'is my', '\n\r', 'test'])
data2 = pd.Series(['here\n', '\t ', ' ', ' is', '\n\r', 'more data'])
# create profilers
with test_utils.mock_timeit():
profiler1 = UnstructuredProfiler(data1)
profiler2 = UnstructuredProfiler(data2)
self.assertDictEqual({'clean_and_base_stats': 1}, profiler1.times)
self.assertDictEqual({'clean_and_base_stats': 1}, profiler2.times)
# mock out _profile
profiler1._profile = 1
profiler2._profile = 2
# merge profilers
with test_utils.mock_timeit():
merged_profile = profiler1 + profiler2
self.assertEqual(10, merged_profile.total_samples)
self.assertEqual(4, merged_profile._empty_line_count)
self.assertEqual(40 / 1024 ** 2, merged_profile.memory_size)
# note how sample doesn't include whitespace lines
self.assertCountEqual(['this', ' is', 'here\n', 'more data', 'is my'],
merged_profile.sample)
self.assertEqual(3, merged_profile._profile)
self.assertDictEqual({'clean_and_base_stats': 2}, merged_profile.times)
@mock.patch('dataprofiler.profilers.profile_builder.UnstructuredCompiler.diff')
def test_diff(self, *mocks):
# Set up compiler diff
mocks[2].side_effect = [UnstructuredCompiler(), UnstructuredCompiler()]
mocks[0].return_value = {
'statistics': {
'all_vocab_and_word_stats': [['A', 'B'], ['C'], ['D']]
},
'data_label': {
'entity_counts': {
'word_and_char_level_stats': {
'LABEL': 'unchanged'
}
},
'entity_percentages': {
'word_and_char_level_stats': {
'LABEL': 'unchanged'
}
}
}
}
data1 = pd.Series(['this', 'is my', '\n\r', 'test'])
data2 = pd.Series(['here\n', '\t ', ' ', ' is', '\n\r', 'more data'])
profiler1 = UnstructuredProfiler(data1)
profiler2 = UnstructuredProfiler(data2)
expected_diff = {
'global_stats': {
'samples_used': -2,
'empty_line_count': -2,
'file_type': 'unchanged',
'encoding': 'unchanged',
'memory_size': -10/1024**2
},
'data_stats': {
'statistics': {
'all_vocab_and_word_stats': [['A', 'B'], ['C'], ['D']]},
'data_label': {
'entity_counts': {
'word_and_char_level_stats':
{'LABEL': 'unchanged'}
},
'entity_percentages': {
'word_and_char_level_stats': {
'LABEL': 'unchanged'
}
}
}
}
}
self.assertDictEqual(expected_diff, profiler1.diff(profiler2))
def test_get_sample_size(self, *mocks):
data = pd.DataFrame([0] * int(50e3))
# test data size < min_sample_size = 5000 by default
profiler = UnstructuredProfiler(None)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
sample_size = profiler._get_sample_size(data[:1000])
self.assertEqual(1000, sample_size)
# test data size * 0.20 < min_sample_size < data size
sample_size = profiler._get_sample_size(data[:10000])
self.assertEqual(5000, sample_size)
# test min_sample_size > data size * 0.20
sample_size = profiler._get_sample_size(data)
self.assertEqual(10000, sample_size)
# test min_sample_size > data size * 0.5
profiler._sampling_ratio = 0.5
sample_size = profiler._get_sample_size(data)
self.assertEqual(25000, sample_size)
def test_clean_data_and_get_base_stats(self, *mocks):
data = pd.Series(['here\n', '\t ', 'a', ' is', '\n\r', 'more data'])
# needed bc _clean_data_and_get_base_stats is not static
# for timeit which wraps this func and uses the class
profiler = mock.Mock(spec=UnstructuredProfiler)
profiler.times = {'clean_and_base_stats': 0}
# case when min_true_samples not set and subset of data
df_series, base_stats = \
UnstructuredProfiler._clean_data_and_get_base_stats(
profiler, data=data, sample_size=3)
# note: bc the sample size is 3, only a subset of the data was sampled
self.assertTrue(np.issubdtype(np.object_, df_series.dtype))
self.assertDictEqual(
{
'sample': ['more data'], # bc of subset sampled
'sample_size': 3,
'empty_line_count': 2,
'memory_size': 25 / 1024 ** 2
},
base_stats)
# case when min_true_samples set and subset of data
df_series, base_stats = \
UnstructuredProfiler._clean_data_and_get_base_stats(
profiler, data=data, sample_size=3, min_true_samples=2)
# note: bc the sample size is 3, only a subset of the data was sampled
self.assertTrue(np.issubdtype(np.object_, df_series.dtype))
self.assertDictEqual(
{
'sample': ['more data', 'here\n', 'a', ' is'],
'sample_size': 6,
'empty_line_count': 2,
'memory_size': 25 / 1024 ** 2
},
base_stats)
def test_update_profile(self, *mocks):
# can properties update correctly for data
data1 = pd.Series(['this', 'is my', '\n\r', 'test'])
data2 = pd.Series(['here\n', '\t ', ' ', ' is', '\n\r', 'more data'])
# profiler with first dataset
with test_utils.mock_timeit():
profiler = UnstructuredProfiler(data1)
self.assertEqual(4, profiler.total_samples)
self.assertEqual(1, profiler._empty_line_count)
self.assertEqual(15 / 1024 ** 2, profiler.memory_size)
# note how sample doesn't include whitespace lines
self.assertCountEqual(['this', 'is my', 'test'], profiler.sample)
self.assertDictEqual({'clean_and_base_stats': 1}, profiler.times)
# update with second dataset
with test_utils.mock_timeit():
profiler.update_profile(data2)
self.assertEqual(10, profiler.total_samples)
self.assertEqual(4, profiler._empty_line_count)
self.assertEqual(40 / 1024 ** 2, profiler.memory_size)
# note how sample doesn't include whitespace lines
self.assertCountEqual(['here\n', ' is', 'more data'], profiler.sample)
self.assertDictEqual({'clean_and_base_stats': 2}, profiler.times)
@mock.patch('dataprofiler.profilers.profile_builder.UnstructuredProfiler.'
'_update_profile_from_chunk')
def test_min_true_samples(self, *mocks):
empty_df = pd.DataFrame([])
# Test invalid input
msg = "`min_true_samples` must be an integer or `None`."
with self.assertRaisesRegex(ValueError, msg):
profile = dp.UnstructuredProfiler(empty_df,
min_true_samples="Bloop")
# Test invalid input given to update_profile
profile = dp.UnstructuredProfiler(empty_df)
with self.assertRaisesRegex(ValueError, msg):
profile.update_profile(empty_df, min_true_samples="Bloop")
# Test None input (equivalent to zero)
profile = dp.UnstructuredProfiler(empty_df, min_true_samples=None)
self.assertEqual(None, profile._min_true_samples)
# Test valid input
profile = dp.UnstructuredProfiler(empty_df, min_true_samples=10)
self.assertEqual(10, profile._min_true_samples)
class TestUnstructuredProfilerWData(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(0)
cls.maxDiff = None
cls.input_data = [
'edited 9 hours ago',
'6. Do not duplicate code.',
'\t',
'Just want to caution against following this too rigidly.',
'\t',
' ',
'When you try to DRY them up into a single generic abstraction, '
'you have inadvertently coupled those two business rules together.',
' ',
' ',
'Removing duplication that repeats the handling of the exact same '
'business rule is also usually a win.',
'',
'Duplicate words: business, win, code',
'\n\r',
'Reply',
'Share',
'Report',
]
cls.dataset = pd.DataFrame(cls.input_data)
# turn off data labeler because if model changes, results also change
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
with test_utils.mock_timeit():
cls.profiler = UnstructuredProfiler(
cls.dataset, len(cls.dataset), options=profiler_options)
cls.profiler2 = UnstructuredProfiler(
pd.DataFrame(['extra', '\n', 'test\n', 'data .',
'For merging.']),
options=profiler_options
)
cls.report = cls.profiler.report()
def test_sample(self):
self.maxDiff = None
self.assertCountEqual(
['Report',
'Reply',
'Removing duplication that repeats the handling of the exact same '
'business rule is also usually a win.',
'edited 9 hours ago',
'Just want to caution against following this too rigidly.'],
self.profiler.sample
)
def test_total_samples(self):
self.assertEqual(16, self.profiler.total_samples)
def test_empty_line_count(self):
self.assertEqual(7, self.profiler._empty_line_count)
def test_get_memory_size(self):
self.assertEqual(393 / 1024 ** 2, self.profiler.memory_size)
def test_text_profiler_results(self):
# vocab order doesn't matter
expected_vocab = ['x', 'i', 'y', 's', '9', ',', 'u', 'b', 'f', 'Y', 'J',
'v', 'r', 'o', 'a', '6', 'n', 'h', ' ', 'g', 'R', 't',
'W', '.', 'm', 'c', 'l', 'e', 'p', 'w', 'S', 'd', 'D',
':']
self.assertCountEqual(
expected_vocab,
self.report['data_stats']['statistics'].pop('vocab'))
# assuming if words are correct, rest of TextProfiler is merged properly
# vocab order doesn't matter, case insensitive, remove stop words
expected_word_count = {
'edited': 1, '9': 1, 'hours': 1, 'ago': 1, '6': 1, 'Do': 1,
'not': 1, 'duplicate': 1, 'code': 2, 'Just': 1, 'want': 1,
'to': 2, 'caution': 1, 'against': 1, 'following': 1, 'this': 1,
'too': 1, 'rigidly': 1, 'When': 1, 'you': 2, 'try': 1, 'DRY': 1,
'them': 1, 'up': 1, 'into': 1, 'a': 2, 'single': 1, 'generic': 1,
'abstraction': 1, 'have': 1, 'inadvertently': 1, 'coupled': 1,
'those': 1, 'two': 1, 'business': 3, 'rules': 1, 'together': 1,
'Removing': 1, 'duplication': 1, 'that': 1, 'repeats': 1, 'the': 2,
'handling': 1, 'of': 1, 'exact': 1, 'same': 1, 'rule': 1, 'is': 1,
'also': 1, 'usually': 1, 'win': 2, 'Duplicate': 1, 'words': 1,
'Reply': 1, 'Share': 1, 'Report': 1}
# adapt to the stop words (brittle test)
stop_words = \
self.profiler._profile._profiles['text']._stop_words
for key in list(expected_word_count.keys()):
if key.lower() in stop_words:
expected_word_count.pop(key)
expected_words = expected_word_count.keys()
self.assertCountEqual(
expected_words,
self.report['data_stats']['statistics'].pop('words'))
# test for vocab_count
expected_vocab_count = {' ': 55, ',': 3, '.': 5, '6': 1, '9': 1,
':': 1, 'D': 3, 'J': 1, 'R': 4, 'S': 1,
'W': 1, 'Y': 1, 'a': 22, 'b': 4, 'c': 10,
'd': 11, 'e': 33, 'f': 2, 'g': 9, 'h': 12,
'i': 24, 'l': 16, 'm': 3, 'n': 21, 'o': 27,
'p': 8, 'r': 13, 's': 23, 't': 31, 'u': 17,
'v': 3, 'w': 6, 'x': 1, 'y': 7}
# expected after the popping: times, vocab, words
expected_report = {
'global_stats': {
'samples_used': 16,
'empty_line_count': 7,
'memory_size': 393 / 1024 ** 2,
'file_type': "<class 'pandas.core.frame.DataFrame'>",
'encoding': None,
'times': {'clean_and_base_stats': 1}
},
'data_stats': {
'data_label': {},
'statistics': {
'word_count': expected_word_count,
'vocab_count': expected_vocab_count,
'times': {'words': 1, 'vocab': 1},
}
}
}
self.assertDictEqual(expected_report, self.report)
def test_add_profilers(self):
merged_profiler = self.profiler + self.profiler2
report = merged_profiler.report()
self.assertEqual(21, merged_profiler.total_samples)
self.assertEqual(8, merged_profiler._empty_line_count)
self.assertEqual(422 / 1024 ** 2, merged_profiler.memory_size)
self.assertCountEqual(
['test\n',
'extra',
'Reply',
'edited 9 hours ago',
'Removing duplication that repeats the handling of the exact same '
'business rule is also usually a win.'],
merged_profiler.sample
)
# assuming if words are correct, rest of TextProfiler is merged properly
# vocab order doesn't matter, case insensitive, remove stop words
expected_word_count = {
'edited': 1, '9': 1, 'hours': 1, 'ago': 1, '6': 1, 'Do': 1,
'not': 1, 'duplicate': 1, 'code': 2, 'Just': 1, 'want': 1,
'to': 2, 'caution': 1, 'against': 1, 'following': 1, 'this': 1,
'too': 1, 'rigidly': 1, 'When': 1, 'you': 2, 'try': 1, 'DRY': 1,
'them': 1, 'up': 1, 'into': 1, 'a': 2, 'single': 1, 'generic': 1,
'abstraction': 1, 'have': 1, 'inadvertently': 1, 'coupled': 1,
'those': 1, 'two': 1, 'business': 3, 'rules': 1, 'together': 1,
'Removing': 1, 'duplication': 1, 'that': 1, 'repeats': 1, 'the': 2,
'handling': 1, 'of': 1, 'exact': 1, 'same': 1, 'rule': 1, 'is': 1,
'also': 1, 'usually': 1, 'win': 2, 'Duplicate': 1, 'words': 1,
'Reply': 1, 'Share': 1, 'Report': 1, 'extra': 1, 'test': 1,
'data': 1, 'merging': 1}
# adapt to the stop words (brittle test)
stop_words = \
merged_profiler._profile._profiles['text']._stop_words
for key in list(expected_word_count.keys()):
if key.lower() in stop_words:
expected_word_count.pop(key)
expected_words = expected_word_count.keys()
self.assertCountEqual(
expected_words,
report['data_stats']['statistics']['words'])
self.assertDictEqual(
expected_word_count,
report['data_stats']['statistics']['word_count'])
def test_update_profile(self):
# turn off data labeler because if model changes, results also change
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
# update profiler and get report
update_profiler = UnstructuredProfiler(self.dataset,
options=profiler_options)
update_profiler.update_profile(pd.DataFrame(['extra', '\n', 'test\n',
'data .', 'For merging.']))
report = update_profiler.report()
# tests
self.assertEqual(21, update_profiler.total_samples)
self.assertEqual(8, update_profiler._empty_line_count)
self.assertEqual(422 / 1024 ** 2, update_profiler.memory_size)
# Note: different from merge because sample is from last update only
self.assertCountEqual(
['test\n', 'extra', 'For merging.', 'data .'],
update_profiler.sample
)
# assuming if words are correct, rest of TextProfiler is merged properly
# vocab order doesn't matter, case insensitive, remove stop words
expected_word_count = {
'edited': 1, '9': 1, 'hours': 1, 'ago': 1, '6': 1, 'Do': 1,
'not': 1, 'duplicate': 1, 'code': 2, 'Just': 1, 'want': 1,
'to': 2, 'caution': 1, 'against': 1, 'following': 1, 'this': 1,
'too': 1, 'rigidly': 1, 'When': 1, 'you': 2, 'try': 1, 'DRY': 1,
'them': 1, 'up': 1, 'into': 1, 'a': 2, 'single': 1, 'generic': 1,
'abstraction': 1, 'have': 1, 'inadvertently': 1, 'coupled': 1,
'those': 1, 'two': 1, 'business': 3, 'rules': 1, 'together': 1,
'Removing': 1, 'duplication': 1, 'that': 1, 'repeats': 1, 'the': 2,
'handling': 1, 'of': 1, 'exact': 1, 'same': 1, 'rule': 1, 'is': 1,
'also': 1, 'usually': 1, 'win': 2, 'Duplicate': 1, 'words': 1,
'Reply': 1, 'Share': 1, 'Report': 1, 'extra': 1, 'test': 1,
'data': 1, 'merging': 1}
# adapt to the stop words (brittle test)
stop_words = \
update_profiler._profile._profiles['text']._stop_words
for key in list(expected_word_count.keys()):
if key.lower() in stop_words:
expected_word_count.pop(key)
expected_words = expected_word_count.keys()
self.assertCountEqual(
expected_words,
report['data_stats']['statistics']['words'])
self.assertDictEqual(
expected_word_count,
report['data_stats']['statistics']['word_count'])
def test_save_and_load(self):
data_folder = "dataprofiler/tests/data/"
test_files = ["txt/code.txt", "txt/sentence-10x.txt"]
for test_file in test_files:
# Create Data and StructuredProfiler objects
data = dp.Data(os.path.join(data_folder, test_file))
save_profile = UnstructuredProfiler(data)
# If profile _empty_line_count = 0, it won't test if the variable is
# saved correctly since that is also the default value. Ensure
# not the default
save_profile._empty_line_count = 1
# store the expected data_labeler
data_labeler = save_profile.options.data_labeler.data_labeler_object
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
# make sure data_labeler unchanged
self.assertIs(
data_labeler,
save_profile.options.data_labeler.data_labeler_object)
self.assertIs(
data_labeler,
save_profile._profile._profiles['data_labeler'].data_labeler)
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler', return_value=data_labeler):
load_profile = UnstructuredProfiler.load("mock.pkl")
# validate loaded profile has same data labeler class
self.assertIsInstance(
load_profile.options.data_labeler.data_labeler_object,
data_labeler.__class__)
self.assertIsInstance(
load_profile.profile._profiles['data_labeler'].data_labeler,
data_labeler.__class__)
# Check that reports are equivalent
save_report = save_profile.report()
load_report = load_profile.report()
self.assertDictEqual(save_report, load_report)
# Check that sample was properly saved and loaded
save_sample = save_profile.sample
load_sample = load_profile.sample
self.assertEqual(save_sample, load_sample)
# validate both are still usable after
save_profile.update_profile(pd.DataFrame(['test', 'test2']))
load_profile.update_profile(pd.DataFrame(['test', 'test2']))
def test_save_and_load_no_labeler(self):
# Create Data and UnstructuredProfiler objects
data = 'this is my test data: 123-456-7890'
profile_options = dp.ProfilerOptions()
profile_options.set({"data_labeler.is_enabled": False})
save_profile = dp.UnstructuredProfiler(data, options=profile_options)
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler'):
load_profile = dp.UnstructuredProfiler.load("mock.pkl")
# Check that reports are equivalent
save_report = save_profile.report()
load_report = load_profile.report()
self.assertDictEqual(save_report, load_report)
# Check that sample was properly saved and loaded
save_sample = save_profile.sample
load_sample = load_profile.sample
self.assertEqual(save_sample, load_sample)
# validate both are still usable after
save_profile.update_profile( | pd.DataFrame(['test', 'test2']) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import copy
import unittest
import pandas as pd
import numpy as np
import skbio
from skbio.util import assert_ordination_results_equal
from pandas.util.testing import assert_frame_equal
from os.path import exists
from shutil import rmtree
import biom
from .util import load_mp_data
from emperor import Emperor
from empress import tools
from empress.core import Empress
from bp import parse_newick, from_skbio_treenode
from six import StringIO
from skbio.tree import TreeNode
class TestCore(unittest.TestCase):
def setUp(self):
self.tree = parse_newick('(((a:1,e:2):1,b:2)g:1,(:1,d:3)h:2):1;')
self.pruned_tree = TreeNode.read(
StringIO('(((a:1)EmpressNode0:1,b:2)g:1,(d:3)h:2)EmpressNode1:1;')
)
# Test table/metadata (mostly) adapted from Qurro:
self.table = biom.Table(np.array([[1, 2, 0, 4],
[8, 7, 0, 5],
[1, 0, 0, 0],
[0, 0, 1, 0]]).T,
list('abed'),
['Sample1', 'Sample2', 'Sample3', 'Sample4'])
self.unrelated_table = biom.Table(np.array([[5, 2, 0, 2],
[2, 3, 0, 1],
[5, 2, 0, 0],
[4, 5, 0, 4]]).T,
list("hijk"),
['Sample1', 'Sample2', 'Sample3',
'Sample4'])
self.sample_metadata = pd.DataFrame(
{
"Metadata1": [0, 0, 0, 1],
"Metadata2": [0, 0, 0, 0],
"Metadata3": [1, 2, 3, 4],
"Metadata4": ["abc", "def", "ghi", "jkl"]
},
index=list(self.table.ids())
)
self.feature_metadata = pd.DataFrame(
{
"fmdcol1": ["asdf", "ghjk"],
"fmdcol2": ["qwer", "tyui"]
},
index=["a", "h"]
)
self.filtered_table = biom.Table(np.array([[1, 2, 4],
[8, 7, 5],
[1, 0, 0]]).T,
['a', 'b', 'd'],
['Sample1', 'Sample2', 'Sample3'])
self.filtered_sample_metadata = pd.DataFrame(
{
"Metadata1": [0, 0, 0],
"Metadata2": [0, 0, 0],
"Metadata3": [1, 2, 3],
"Metadata4": ["abc", "def", "ghi"]
},
index=["Sample1", "Sample2", "Sample3"]
)
eigvals = pd.Series(np.array([0.50, 0.25, 0.25]),
index=['PC1', 'PC2', 'PC3'])
samples = np.array([[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]])
proportion_explained = pd.Series([15.5, 12.2, 8.8],
index=['PC1', 'PC2', 'PC3'])
samples_df = pd.DataFrame(samples,
index=['Sample1', 'Sample2', 'Sample3',
'Sample4'],
columns=['PC1', 'PC2', 'PC3'])
self.pcoa = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals,
samples_df,
proportion_explained=proportion_explained)
features = np.abs(samples_df.copy() / 2.0).iloc[:2, :]
features.index = 'f.' + features.index
self.biplot_no_matches = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals,
samples_df,
features=features,
proportion_explained=proportion_explained)
features = np.abs(samples_df / 2.0).iloc[:2, :]
features.index = pd.Index(['a', 'h'])
self.biplot = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals,
samples_df,
features=features,
proportion_explained=proportion_explained)
self.biplot_tree = parse_newick(
'(((y:1,z:2):1,b:2)g:1,(:1,d:3)h:2):1;')
self.biplot_table = biom.Table(np.array([[1, 2], [8, 7],
[1, 0], [0, 3]]).T,
['y', 'z'],
['Sample1', 'Sample2', 'Sample3',
'Sample4'])
self.files_to_remove = []
self.maxDiff = None
def tearDown(self):
for path in self.files_to_remove:
if exists(path):
rmtree(path)
def test_init(self):
viz = Empress(self.tree, self.table, self.sample_metadata,
shear_to_table=False)
self.assertEqual(viz.base_url, 'support_files')
self.assertEqual(list(viz.tree.B), [1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1,
1, 0, 1, 0, 0, 0])
names = ['a', 'e', None, 'b', 'g', None, 'd', 'h', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
# table should be unchanged and be a different id instance
self.assertEqual(self.table, viz.table)
self.assertNotEqual(id(self.table), id(viz.table))
# sample metadata should be unchanged and be a different id instance
assert_frame_equal(self.sample_metadata, viz.samples)
self.assertNotEqual(id(self.sample_metadata), id(viz.samples))
self.assertIsNone(viz.features)
self.assertIsNone(viz.ordination)
self.assertTrue(viz.is_community_plot)
def test_init_tree_plot(self):
# Simplest case (no feature metadata)
viz = Empress(self.tree)
self.assertFalse(viz.is_community_plot)
self.assertIsNone(viz.tip_md)
self.assertIsNone(viz.int_md)
# Slightly less simple case (with feature metadata)
viz = Empress(self.tree, feature_metadata=self.feature_metadata)
self.assertFalse(viz.is_community_plot)
assert_frame_equal(viz.tip_md, self.feature_metadata.loc[["a"]])
assert_frame_equal(viz.int_md, self.feature_metadata.loc[["h"]])
def test_init_tree_plot_extra_fm(self):
# Checks that extra stuff in the feature metadata (which doesn't match
# any node in the tree) is filtered out of the visualization, even if
# tree-plot is used.
extra_fm = pd.DataFrame(
{
"fmdcol1": ["zxcv", "bnm,"],
"fmdcol2": ["zaq1", "xsw2"]
},
index=["weshould", "befiltered"]
)
smooshed_fm = self.feature_metadata.append(extra_fm)
viz = Empress(self.tree, feature_metadata=smooshed_fm)
self.assertFalse(viz.is_community_plot)
assert_frame_equal(viz.tip_md, self.feature_metadata.loc[["a"]])
assert_frame_equal(viz.int_md, self.feature_metadata.loc[["h"]])
def test_init_tree_plot_fm_not_matching(self):
# Mainly, this test validates that the matching done between the tree
# nodes and feature metadata is still performed even if tree-plot is
# used.
bad_fm = self.feature_metadata.copy()
bad_fm.index = ["idont", "match :O"]
with self.assertRaisesRegex(
tools.DataMatchingError,
(
"No features in the feature metadata are present in the tree, "
"either as tips or as internal nodes."
)
):
Empress(self.tree, feature_metadata=bad_fm)
def test_init_tree_plot_shear_without_metadata(self):
with self.assertRaisesRegex(ValueError,
"Feature metadata must be provided"):
Empress(self.tree, shear_to_feature_metadata=True)
def test_init_only_one_of_table_and_sm_passed(self):
exp_errmsg = (
"Both the table and sample metadata should be specified or None. "
"However, only one of them is None."
)
with self.assertRaisesRegex(ValueError, exp_errmsg):
Empress(self.tree, self.table)
with self.assertRaisesRegex(ValueError, exp_errmsg):
Empress(self.tree, sample_metadata=self.sample_metadata)
def test_init_with_ordination(self):
viz = Empress(self.tree, self.table, self.sample_metadata,
ordination=self.pcoa,
shear_to_table=False)
self.assertEqual(viz.base_url, 'support_files')
self.assertEqual(list(viz.tree.B), [1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1,
1, 0, 1, 0, 0, 0])
names = ['a', 'e', None, 'b', 'g', None, 'd', 'h', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
# table should be unchanged and be a different id instance
self.assertEqual(self.table, viz.table)
self.assertNotEqual(id(self.table), id(viz.table))
# sample metadata should be unchanged and be a different id instance
assert_frame_equal(self.sample_metadata, viz.samples)
self.assertNotEqual(id(self.sample_metadata), id(viz.samples))
self.assertIsNone(viz.features)
assert_ordination_results_equal(viz.ordination, self.pcoa)
# emperor is instantiated as needed but not yet setup
self.assertTrue(isinstance(viz._emperor, Emperor))
def test_init_with_ordination_features(self):
'''Check that empress does not break when ordination has features
but empress itself does not.'''
viz = Empress(self.tree, self.table, self.sample_metadata,
ordination=self.biplot, shear_to_table=False)
self.assertIsNone(viz.features)
def test_init_with_ordination_empty_samples_in_pcoa(self):
def make_bad(v, i, m):
if i in ['Sample2', 'Sample4']:
return np.zeros(len(v))
else:
return v
bad_table = self.table.copy()
bad_table.transform(make_bad, inplace=True)
with self.assertRaisesRegex(
ValueError,
(
r"The ordination contains samples that are empty \(i.e. "
r"all 0s\) in the table. Problematic sample IDs: Sample2, "
"Sample4"
)
):
Empress(self.tree, bad_table, self.sample_metadata,
ordination=self.pcoa,
shear_to_table=False)
def test_copy_support_files_use_base(self):
local_path = './some-local-path/'
viz = Empress(self.tree, self.table, self.sample_metadata,
resource_path=local_path,
shear_to_table=False)
self.assertEqual(viz.base_url, local_path)
viz.copy_support_files()
self.assertTrue(exists(local_path))
self.files_to_remove.append(local_path)
def test_copy_support_files_use_target(self):
local_path = './other-local-path/'
viz = Empress(self.tree, self.table, self.sample_metadata,
resource_path=local_path,
shear_to_table=False)
self.assertEqual(viz.base_url, local_path)
viz.copy_support_files(target='./something-else')
self.assertTrue(exists('./something-else'))
self.files_to_remove.append(local_path)
self.files_to_remove.append('./something-else')
def test_to_dict(self):
viz = Empress(self.tree, self.table, self.sample_metadata,
shear_to_table=False)
obs = viz.to_dict()
dict_a_cp = copy.deepcopy(DICT_A)
# NOTE: Uncomment the following two lines of code to write the current
# DICT_A to a file. Once it's written to a file, you can run
# "black -l 79 dictcode.py" (while in the same directory as the file)
# to format it so that it's consistent with how DICT_A is set up at the
# bottom of this file.
# with open("dictcode.py", "w") as f:
# f.write("DICT_A = {}".format(str(obs)))
self.assertEqual(obs, dict_a_cp)
def test_to_dict_with_feature_metadata(self):
viz = Empress(
self.tree, self.table, self.sample_metadata, self.feature_metadata,
shear_to_table=False
)
obs = viz.to_dict()
dict_a_with_fm = copy.deepcopy(DICT_A)
dict_a_with_fm["compressed_tip_metadata"] = {1: ["asdf", "qwer"]}
dict_a_with_fm["compressed_int_metadata"] = {8: ["ghjk", "tyui"]}
dict_a_with_fm["feature_metadata_columns"] = ["fmdcol1", "fmdcol2"]
self.assertEqual(obs, dict_a_with_fm)
def test_to_dict_with_metadata_nans(self):
nan_sample_metadata = self.sample_metadata.copy()
nan_feature_metadata = self.feature_metadata.copy()
nan_sample_metadata.at["Sample2", "Metadata4"] = np.nan
nan_feature_metadata.at["h", "fmdcol1"] = np.nan
nan_feature_metadata.at["a", "fmdcol2"] = np.nan
viz = Empress(self.tree, self.table, nan_sample_metadata,
nan_feature_metadata,
shear_to_table=False)
obs = viz.to_dict()
dict_a_nan = copy.deepcopy(DICT_A)
# [1][3] corresponds to Sample2, Metadata4
dict_a_nan["compressed_sample_metadata"][1][3] = str(np.nan)
dict_a_nan["compressed_tip_metadata"] = {1: ["asdf", str(np.nan)]}
dict_a_nan["compressed_int_metadata"] = {8: [str(np.nan), "tyui"]}
dict_a_nan["feature_metadata_columns"] = ["fmdcol1", "fmdcol2"]
self.assertEqual(obs, dict_a_nan)
res = viz.make_empress()
self.assertTrue('empressRequire' in res)
self.assertTrue('empress = new Empress' in res)
self.assertTrue('emperor_require_logic' not in res)
def test_to_dict_with_emperor(self):
viz = Empress(self.tree, self.table, self.sample_metadata,
ordination=self.pcoa,
shear_to_table=False,
filter_extra_samples=True)
obs = viz.to_dict()
self.assertEqual(viz._emperor.width, '50vw')
self.assertEqual(viz._emperor.height, '100vh; float: right')
self.assertEqual(viz._emperor.settings['axes']['axesColor'], 'black')
self.assertEqual(viz._emperor.settings['axes']['backgroundColor'],
'white')
# we test key by key so we can do "general" checks on the emperor
# values, this helps with tests not breaking if any character changes
# in # Emperor
dict_a_cp = copy.deepcopy(DICT_A)
# set is_empire_plot flag to True since DICT_A sets it as False (all
# other test use a False value)
dict_a_cp["is_empire_plot"] = True
for key, value in obs.items():
if not key.startswith('emperor_'):
self.assertEqual(obs[key], dict_a_cp[key])
exp = "<div id='emperor-in-empire'"
self.assertTrue(obs['emperor_div'].startswith(exp))
exp = "// When running in the Jupyter"
self.assertTrue(obs['emperor_require_logic'].startswith(exp))
exp = "}); // END REQUIRE.JS block"
self.assertTrue(obs['emperor_require_logic'].endswith(exp))
self.assertTrue('"#emperor-css"' in obs['emperor_style'])
exp = "vendor/js/jquery-"
self.assertEqual(obs['emperor_base_dependencies'].count(exp), 1)
self.assertTrue(obs['emperor_classes'], 'combined-plot-container')
def _clear_copied_dict_a(self, dict_a_cp):
"""Clears a copy of DICT_A to look as we'd expect it to look if
qiime empress tree-plot was used (i.e. no table / sample metadata
were specified).
"""
dict_a_cp["is_community_plot"] = False
# When no table / s. metadata is passed, many values in the dict
# representation should just be None
for nfield in [
"s_ids", "f_ids", "compressed_table", "sample_metadata_columns",
"compressed_sample_metadata"
]:
dict_a_cp[nfield] = None
# These things are represented as empty dicts, though. (The main reason
# for this is that making f_ids_to_indices be None in this case would
# have required some gross special-casing to refactor things, so for
# the sake of consistency and clean code both default to {}.)
for efield in ["s_ids_to_indices", "f_ids_to_indices"]:
dict_a_cp[efield] = {}
# We don't need to reset the feature metadata stuff because that should
# already be empty, since the main to_dict test doesn't use f.m.
def test_to_dict_tree_plot(self):
viz = Empress(self.tree)
dict_a_cp = copy.deepcopy(DICT_A)
self._clear_copied_dict_a(dict_a_cp)
obs = viz.to_dict()
self.assertEqual(obs, dict_a_cp)
def test_to_dict_tree_plot_with_feature_metadata(self):
viz = Empress(self.tree, feature_metadata=self.feature_metadata)
# Set up expected dict
dict_a_cp = copy.deepcopy(DICT_A)
self._clear_copied_dict_a(dict_a_cp)
# Copied from test_to_dict_with_feature_metadata() above
dict_a_cp["compressed_tip_metadata"] = {1: ["asdf", "qwer"]}
dict_a_cp["compressed_int_metadata"] = {8: ["ghjk", "tyui"]}
dict_a_cp["feature_metadata_columns"] = ["fmdcol1", "fmdcol2"]
obs = viz.to_dict()
self.assertEqual(obs, dict_a_cp)
def test_shear_tree_to_table(self):
viz = Empress(self.tree, self.filtered_table,
self.filtered_sample_metadata,
shear_to_table=True)
self.assertEqual(list(viz.tree.B), [1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1,
0, 0, 0])
names = ['a', None, 'b', 'g', 'd', 'h', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
# table should be unchanged and be a different id instance
self.assertEqual(self.filtered_table, viz.table)
self.assertNotEqual(id(self.filtered_table), id(viz.table))
# sample metadata should be unchanged and be a different id instance
assert_frame_equal(self.filtered_sample_metadata, viz.samples)
self.assertNotEqual(id(self.filtered_sample_metadata), id(viz.samples))
self.assertIsNone(viz.features)
self.assertIsNone(viz.ordination)
def test_fm_filtering_post_shearing(self):
extra_fm = self.feature_metadata.copy()
extra_fm.loc["e"] = "i'm going to be filtered :O"
viz = Empress(self.tree, self.filtered_table,
self.filtered_sample_metadata, feature_metadata=extra_fm,
shear_to_table=True)
# Same as with the shearing test above, check that the tree was handled
# as expected
self.assertEqual(list(viz.tree.B), [1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1,
0, 0, 0])
names = ['a', None, 'b', 'g', 'd', 'h', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
# Now, the point of this test: verify that the feature metadata was
# filtered to just stuff in the sheared tree ("e" was removed from the
# tip metadata)
assert_frame_equal(extra_fm.loc[["a"]], viz.tip_md)
assert_frame_equal(extra_fm.loc[["h"]], viz.int_md)
# table should be unchanged and be a different id instance
self.assertEqual(self.filtered_table, viz.table)
self.assertNotEqual(id(self.filtered_table), id(viz.table))
# sample metadata should be unchanged and be a different id instance
assert_frame_equal(self.filtered_sample_metadata, viz.samples)
self.assertNotEqual(id(self.filtered_sample_metadata), id(viz.samples))
self.assertIsNone(viz.ordination)
def test_shear_tree_to_fm_simple(self):
# remove e same as in test_shear_tree
mini_fm = self.feature_metadata.copy()
mini_fm.loc["b"] = ["pikachu", "raichu"]
mini_fm.loc["d"] = ["mew", "mewtwo"]
viz = Empress(self.tree, feature_metadata=mini_fm,
shear_to_table=False, shear_to_feature_metadata=True)
self.assertEqual(list(viz.tree.B), [1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1,
0, 0, 0])
names = ['a', None, 'b', 'g', 'd', 'h', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
assert_frame_equal(viz.tip_md, mini_fm.loc[["a", "b", "d"]])
assert_frame_equal(viz.int_md, mini_fm.loc[["h"]])
# feature metadata should be unchanged and be a different id instance
assert_frame_equal(mini_fm, viz.features)
self.assertNotEqual(id(mini_fm), id(viz.features))
self.assertIsNone(viz.ordination)
def test_shear_tree_to_fm_only_int(self):
int_fm = pd.DataFrame(
{
"fmdcol1": ["vulpix", "ninetales"],
"fmdcol2": ["growlithe", "arcanine"]
},
index=["g", "h"]
)
exp_errmsg = (
"Cannot shear tree to feature metadata: no tips in "
"the tree are present in the feature metadata."
)
with self.assertRaisesRegex(ValueError, exp_errmsg):
Empress(self.tree, feature_metadata=int_fm, shear_to_table=False,
shear_to_feature_metadata=True)
def test_shear_tree_to_fm_one_tip(self):
lonely_fm = pd.DataFrame(
{
"fmdcol1": ["mimikyu"],
},
index=["a"]
)
viz = Empress(self.tree, feature_metadata=lonely_fm,
shear_to_table=False, shear_to_feature_metadata=True)
names = ['a', None, 'g', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
assert_frame_equal(viz.tip_md, lonely_fm.loc[["a"]])
self.assertTrue(viz.int_md.empty)
# feature metadata should be unchanged and be a different id instance
assert_frame_equal(lonely_fm, viz.features)
self.assertNotEqual(id(lonely_fm), id(viz.features))
self.assertIsNone(viz.ordination)
def test_shear_tree_to_fm_rmv_int_md(self):
"""
Shear tree to feature metadata but metadata has entry for an internal
node that gets filtered out from the shearing.
"""
# default feature metadata works - internal node h filtered out
viz = Empress(self.tree, feature_metadata=self.feature_metadata,
shear_to_table=False, shear_to_feature_metadata=True)
names = ['a', None, 'g', None]
for i in range(1, len(viz.tree) + 1):
node = viz.tree.postorderselect(i)
self.assertEqual(viz.tree.name(node), names[i - 1])
| assert_frame_equal(viz.tip_md, self.feature_metadata.loc[["a"]]) | pandas.util.testing.assert_frame_equal |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression# 今回使うアルゴリズム
from sklearn.tree import DecisionTreeClassifier # 決定木(分類)
import pandas as pd# データフレームを扱うための機能。
import datetime
from dateutil.relativedelta import relativedelta
'''
目的変数と説明変数
予測において
予測したいもの⇒目的変数
予測の手がかりとするもの⇒説明変数(機械学習の文やでは特徴量という)
と言います。例えば、奥さんの機嫌を知りたいなら、奥さんの機嫌が「目的変数」で、説明変数には「季節」や「給料」、「帰宅時間」、「1日当たりの接触時間」といったものがあるのかもしれません。例えば、帰宅時間が早いほど奥さんの機嫌が悪くなるのであれば、帰宅時間を「説明変数」にして奥さんの機嫌が悪くさせないには何時に帰れば良いか分かるのかもしれません。
この「説明変数」は一つだけではなくて、複数使うこともできるようです。
単回帰モデルでは「1つの目的変数を1つの説明変数によって予測」します。
重回帰モデルでは「1つの目的変数を複数の説明変数によって予測」します。
先ほどの過学習の部分でも書いた通り、過去のデータに過剰にフィットさせると、実際には役に立たなかった、ということになりかねないため、適切な説明変数を使うことが予測では大切だそうです。
単回帰モデルについての理解
単回帰モデルでは、最小二乗法というものによって求められる回帰直線を使って予測を行います。
最小二乗法によって求められる回帰直線は2つの値によって表される散布図を近似する直線を表します。2つの値に相関関係がみられる場合、散布図にプロットされる点の集まりは直線っぽくなります。ですから、単回帰モデルは説明変数と目的変数にある程度の相関がある場合に使えそうです。相関がないようなら短回帰モデルを使ってもまともな結果は求められなさそうです。
単回帰分析をするときに説明変数をreshapeする理由
あるデータtrainがあって、そのうちのdayというカラム(列)を説明変数、yというカラム(列)を目的変数に選択して短回帰モデルを作成するとき
LinearRegression.fit(day.values,y)
1
LinearRegression.fit(day.values,y)
とすると
ValueError: Expected 2D array, got 1D array instead:
1
ValueError: Expected 2D array, got 1D array instead:
というエラーを吐かれます。で、僕しばらくこの2D(2次元)を2列の配列だと勘違いしていていました。
元のデータ”day.values”は[1,2,3,…]と定義されていて1次元配列です。
要求されているのは二次元配列なので、reshape()を使うことで実現できます。
day.values.reshape(-1,1)とすると作成される配列は[[1,2,3,…]]となります。
[[1,2,3,…]]も実際のところは元のデータ”day.values”と同じようにn行1列のデータに変わりないとは思いますが、fit()に渡す説明変数が1つの場合はreshape(-1,1)としてn行1列の2次元配列にしなければならないようでした。
reshape(-1,1)の意味
reshape(-1,n)とすると、列数がnとなるるような配列に変形します。
reshape(n,-1)とすると、行数がnとなるような配列に変形します。
'''
X_train = [] # 教師データ
y_train = [] # 上げ下げの結果の配列
y_test = []
code = '6758'
'''教師データの数値の配列 (train_X) と結果の配列 (train_y) を学習させ、テストデータの数値の配列 (test_X) を与えると予測結果 (test_y) が帰ってくる'''
'''次回の株価予測AIを作る'''
#次回の株価を直近10日分の株価の推移を基に予測しましょう。株価予測AIの条件設定の部分を以下のように変更して、学習させます。
'''条件設定。'''
interval = 10 #直近何日分の株価を基に予測するか。(test_x)
future = 1 #何日先の株価を予測するかは”future”の値を変更する。(test_y)
import datetime
#2021年から今日までの1年間のデータを取得しましょう。期日を決めて行きます。
start_train = datetime.date(2017, 1, 1)#教師データ(今までのデータ)
#end_train = datetime.date(2021,12,31)
end_train= datetime.date.today() + relativedelta(days=-1)#昨日分(today-1日)まで取得できる(当日分は変動しているため)
from pandas_datareader import data as pdr
#datetime.date.today() + relativedelta(days=-1)
#start_test = datetime.date(2022, 1, 1)#試験データ
start_test = datetime.date.today() + relativedelta(days= -1)#試験データ
#end_test = datetime.date.today()#昨日分(today-1日)まで取得できる(当日分は変動しているため)
end_test = datetime.date.today()# + relativedelta(days= -1)
'''使うデータを読み込む。'''
#closed = pdr.get_data_yahoo(f'{code}.T', start, end)["Close"] # 株価データの取得
Stock_train_df = pdr.get_data_yahoo(f'{code}.T', start_train, end_train)["Adj Close"] # 教師データのcsvファイルを読み込む。
Stock_test_df = pdr.get_data_yahoo(f'{code}.T', start_test, end_test)["Adj Close"]# 試験データのcsvファイルを読み込む。
'''説明変数としてのinterval回分の株価の推移と、それに対応する目的変数としてのfuture回後の株価の配列を作るための関数の作成。'''
#interval = 直近何回分の株価を基に予測するか。
#future = 何回後の株価を予測するか。
def make_data(data):
x = []# 説明変数
y = []# 目的変数
temps=list(data["Adj Close"])#株価(終値調整値)の配列を作成。
for i in range(len(temps) - future):#i番目の株価について、(50-1)=len49
if i < interval:continue#iがinterval(50)分までは予測できないから、iがinterval(50)より小さければ何もしない。
y.append([temps[i + future - 1]])#i + future(1)番目の株価をyに付け足す。
xa = []#i - interval番目からi-1番目の株価を格納するxaを作成。
for p in range(interval):#i - interval番目からi-1番目の株価を
d = i - interval + p
xa.append(temps[d])#xaにため込んでひとまとめ([ ]で囲まれた、interval回分の株価データ群をイメージ)にして、
x.append(xa)#xにxaを付けたしていく。
return(x, y)#完成したx,yを返す。
'''
教師データをつくる
まずは一番面倒な株価の調整後終値から教師データを作るまでのコードを用意します。
これは終値のリストを渡すと train_X と train_y が返るようにすれば良いでしょう。
'''
def train_data(arr): # arr = test_X
train_X = [] # 教師データ
train_y = [] # 上げ下げの結果の配列
# 30 日間のデータを学習、 1 日ずつ後ろ(today方向)にずらしていく
for i in np.arange(-30, -15):
s = i + 14 # 14 日間の変化を素性にする
feature = arr.iloc[i:s] # i(-50)~s(-36)行目を取り出す
if feature[-1] < arr[s]: # その翌日、株価は上がったか?
train_y.append(1) # YES なら 1 を
else:
train_y.append(0) # NO なら 0 を
train_X.append(feature.values)
# 教師データ(train_X)と上げ下げの結果(train_y)のセットを返す
return np.array(train_X), np.array(train_y)
#これで train_X (教師データの配列) と train_y (それに対する 1 か 0 かのラベル) を返します。
'''
reshape(-1,1)の意味
reshape(-1,n)とすると、列数がnとなるるような配列に変形します。
reshape(n,-1)とすると、行数がnとなるような配列に変形します。
X = test_X.reshape(1,-1)
result = lr.predict(X)
print(result)
'''
'''
# 決定木の学習を行う
# 決定木のインスタンスを生成
tree = DecisionTreeClassifier(criterion='gini', max_depth=None)
# 学習させる
tree.fit(train_X, train_y)
# 決定木のインスタンスを生成
clf = tree.DecisionTreeClassifier()
# 学習させる
clf.fit(train_X, train_y)
pred = tree.predict(test_x)
'''
'''学習と予測'''
#アルゴリズムの選択。
lr = LinearRegression(normalize = True)# アルゴリズムの選択。
'''
1.X_train: 訓練データ
2.X_test: テストデータ
3.Y_train: 訓練データの正解ラベル
4.Y_test: テストデータの正解ラベル
'''
'''教師データと試験データの作成'''
'''引数stratifyに均等に分割させたいデータ(多くの場合は正解ラベル)を指定すると、そのデータの値の比率が一致するように分割される。'''
#X_train, y_train, = train_test_split(Stock_train_df, test_size=0.2, random_state=0,shuffle=False)
#print(",X_train= ",X_train, "y_train= ",y_train)
'''教師データと試験データの作成'''
train_x, train_y = train_data(Stock_train_df) # 教師データ作成。
test_x, test_y = train_data(Stock_test_df) # 試験データ作成。
'''引数stratifyに均等に分割させたいデータ(多くの場合は正解ラベル)を指定すると、そのデータの値の比率が一致するように分割される。'''
X_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.2, random_state=0,shuffle=False)
print(",train_x= ",train_x, "train_y= ",train_y)
'''学習'''
'''教師データの数値の配列 (train_X) と結果の配列 (train_y) を学習させる'''
lr.fit(X_train, y_train)
'''予測'''
'''テストデータの数値の配列 (test_X) を与えると予測結果 (test_y) が帰ってくる'''
print("(test_x=) ",test_x[0])
test_y = lr.predict(test_x)#テストデータの数値の配列 (test_X)
print("翌日予測(test_y=)",test_y)
#pred = tree.predict(test_x)
x_pred =test_y
from sklearn.metrics import accuracy_score
print(test_x)
print(x_pred)
print (accuracy_score(test_x, x_pred))
print (accuracy_score(test_x, x_pred,normalize=False))
'''
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
import random
if __name__ == '__main__':
# データセットを読み込む
iris = load_iris()
x = iris.data
y = iris.target
# 読み込んだデータセットをシャッフルする
p = list(zip(x, y))
random.shuffle(p)
x, y = zip(*p)
# 学習データの件数を指定する
train_size = 100
test_size = len(x) - train_size
# データセットを学習データとテストデータに分割する
train_x = x[:train_size]
train_y = y[:train_size]
test_x = x[train_size:]
test_y = y[train_size:]
# 決定木の学習を行う
tree = DecisionTreeClassifier(criterion='gini', max_depth=None)
tree.fit(train_x, train_y)
# 学習させたモデルを使ってテストデータに対する予測を出力する
count = 0
pred = tree.predict(test_x)
for i in range(test_size):
print('[{0}] correct:{1}, predict:{2}'.format(i, test_y[i], pred[i]))
if pred[i] == test_y[i]:
count += 1
# 予測結果から正答率を算出する
score = float(count) / test_size
print('{0} / {1} = {2}'.format(count, test_size, score))
'''
'''
train_test_split
概要
scikit-learnのtrain_test_split()関数を使うと、与えたデータをいろいろな方法で訓練データとテストデータに切り分けてくれる。
scikit-learnのtrain_test_split()関数を使うと、NumPy配列ndarrayやリストなどを二分割できる。
機械学習においてデータを訓練用(学習用)とテスト用に分割してホールドアウト検証を行う際に用いる。
'''
'''
train_test_splitでの戻り値は以下の通りです。
1.X_train: 訓練データ
2.X_test: テストデータ
3.Y_train: 訓練データの正解ラベル
4.Y_test: テストデータの正解ラベル
'''
'''train_test_split()の基本的な使い方'''
#train_test_split()にNumPy配列ndarrayを渡すと、二分割されたndarrayが要素として格納されたリストが返される。
a = np.arange(10)#numpy.ndarray
print(a)
# [0 1 2 3 4 5 6 7 8 9]
print(train_test_split(a))
# [array([3, 9, 6, 1, 5, 0, 7]), array([2, 8, 4])]
print(type(train_test_split(a)))
# <class 'list'>
print(len(train_test_split(a)))
# 2
'''以下のように、アンパックでそれぞれ2つの変数に代入することが多い。'''
a_train, a_test = train_test_split(a)
print(a_train)
# [3 4 0 5 7 8 2]
print(a_test)
# [6 1 9]
#例はnumpy.ndarrayだが、list(Python組み込みのリスト)やpandas.DataFrame, Series、疎行列scipy.sparseにも対応している。
# pandas.DataFrame, Seriesの例は最後に示す。
'''割合、個数を指定: 引数test_size, train_size'''
#引数test_sizeでテスト用(返されるリストの2つめの要素)の割合または個数を指定できる。
#デフォルトはtest_size=0.25で25%がテスト用、残りの75%が訓練用となる。小数点以下は切り上げとなり、上の例では10 * 0.25 = 2.5 -> 3となっている。
#test_sizeには0.0 ~ 1.0の割合か、個数を指定する。
#割合で指定した例。
a_train, a_test = train_test_split(a, test_size=0.6)#引数test_size
print(a_train)
# [9 1 2 6]
print(a_test)
# [5 7 4 3 0 8]
#個数で指定した例。
a_train, a_test = train_test_split(a, test_size=6)
print(a_train)
# [4 2 1 0]
print(a_test)
# [7 6 3 9 8 5]
#引数train_sizeで訓練用の割合・個数を指定することもできる。test_sizeと同様に、0.0 ~ 1.0の割合か、個数を指定する。
a_train, a_test = train_test_split(a, train_size=0.6)
print(a_train)
# [2 9 6 0 4 3]
print(a_test)
# [7 8 5 1]
a_train, a_test = train_test_split(a, train_size=6)
print(a_train)
# [9 3 0 8 7 1]
print(a_test)
# [5 6 4 2]
#これまでの例のように引数test_size, train_sizeのいずれかのみを指定した場合、他方の数はその残りになるが、それぞれを別途指定することも可能。
a_train, a_test = train_test_split(a, test_size=0.3, train_size=0.4)
print(a_train)
# [3 0 4 9]
print(a_test)
# [7 2 8]
a_train, a_test = train_test_split(a, test_size=3, train_size=4)
print(a_train)
# [9 7 0 4]
print(a_test)
# [3 8 5]
'''シャッフルするかを指定: 引数shuffle'''
#これまでの例のように、デフォルトでは要素がシャッフルされて分割される。引数shuffle=Falseとするとシャッフルされずに先頭から順番に分割される。
a_train, a_test = train_test_split(a, shuffle=False)
#print(a_train)
# [0 1 2 3 4 5 6]
#print(a_test)
# [7 8 9]
'''乱数シードを指定: 引数random_state'''
#シャッフルされる場合、デフォルトでは実行するたびにランダムに分割される。引数random_stateを指定して乱数シードを固定すると常に同じように分割される。
a_train, a_test = train_test_split(a, random_state=0)
#print(a_train)
# [9 1 6 7 3 0 5]
#print(a_test)
# [2 8 4]
'''機械学習のモデルの性能を比較するような場合、どのように分割されるかによって結果が異なってしまうため、乱数シードを固定して常に同じように分割されるようにする必要がある。'''
X = np.arange(20).reshape(2, 10).T
Z = np.arange(20).reshape(2, 10).T
'''層化抽出: 引数stratify(相似化)'''
#例えば教師あり学習では特徴行列(説明変数)と正解ラベル(目的変数)の2つのデータを用いる。
#二値分類(2クラス分類)では正解ラベルは、例えば以下のように0, 1のいずれかになる。
y = np.array([0] * 5 + [1] * 5)
#print(y)
# [0 0 0 0 0 1 1 1 1 1]
'''複数データの同時分割
#train_test_split()は複数データを同時に分割することもできる。
#以下の例では、二つの配列を引数として与えている。その結果は、与えた配列ごとに訓練データ、テストデータの順でタプルとして返される。
#教師あり学習のためにデータを分割する場合、訓練用とテスト用の正解ラベルの比率は元のデータの正解ラベルの比率と一致していることが望ましいが、
# 例えば以下のようにテスト用に0の要素が含まれていないといったことが起こり得る。
X_train, X_test, y_train, y_test,Z_train, Z_test = train_test_split(X, y, Z,test_size=0.2, random_state=0, shuffle=False)
print(y_train)
# [0 1 0 0 0 0 1 1]
print(y_test)
# [1 1]
print(X_train)
# [0 1 0 0 0 0 1 1]
print(X_test)
# [1 1]
print(Z_train)
# [0 1 0 0 0 0 1 1]
print(Z_test)
# [1 1]
'''
'''引数stratifyに均等に分割させたいデータ(多くの場合は正解ラベル)を指定すると、そのデータの値の比率が一致するように分割される。'''
#1.X_train: 訓練データ
#2.X_test: テストデータ
#3.Y_train: 訓練データの正解ラベル
#4.Y_test: テストデータの正解ラベル
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,test_size=0.2, random_state=100,stratify=y)
print(" 訓練データ",X_train)
# [1 1 0 0 0 1 1 0]
print("テストデータ",X_test)
# [1 0]
print("訓練データの正解ラベル",y_train)
# [1 1 0 0 0 1 1 0]
print("テストデータの正解ラベル",y_test)
# [1 0]
#サンプル数が少ないとイメージしにくいので、次の具体例も参照されたい。
#具体的な例(アイリスデータセット)
#具体的な例として、アイリスデータセットを分割する。
#150件のデータがSepal Length(がく片の長さ)、Sepal Width(がく片の幅)、Petal Length(花びらの長さ)、Petal Width(花びらの幅)の4つの特徴量を持っており、
# Setosa, Versicolor, Virginicaの3品種に分類されている。
#load_iris()でデータを取得する。正解ラベルyには0, 1, 2の3種類が均等に含まれている。
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
data = load_iris()
X = data['data']
y = data['target']
print(X.shape)
# (150, 4)
print(X[:5])
# [[5.1 3.5 1.4 0.2]
# [4.9 3. 1.4 0.2]
# [4.7 3.2 1.3 0.2]
# [4.6 3.1 1.5 0.2]
# [5. 3.6 1.4 0.2]]
print(y.shape)
# (150,)
print(y)
# [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
# 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
# 2 2]
#train_test_split()で以下のように分割できる。test_sizeやtrain_sizeは設定していないので、デフォルトの通り、訓練用75%とテスト用25%に分割される。サイズが大きいので形状shapeのみ示している。
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print(X_train.shape)
# (112, 4)
print(X_test.shape)
# (38, 4)
print(y_train.shape)
# (112,)
print(y_test.shape)
# (38,)
#テスト用の正解ラベルy_testを確認すると、各ラベルの数が不均等になっている。
print(y_test)
# [2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0
# 1]
print((y_test == 0).sum())
# 13
print((y_test == 1).sum())
# 16
print((y_test == 2).sum())
# 9
#引数stratifyを指定すると、分割後の各ラベルの比率が元のデータの比率(この例では3種類が均等)と一致するように分割できる。以下の例のように分割後の個数によっては完全に一致しない(割り切れない)こともあるが、できる限り元の比率に近くなっていることが分かる。
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, stratify=y)
print(y_test)
# [0 0 0 0 1 1 1 0 1 2 2 2 1 2 1 0 0 2 0 1 2 1 1 0 2 0 0 1 2 1 0 1 2 2 0 1 2
# 2]
print((y_test == 0).sum())
# 13
print((y_test == 1).sum())
# 13
print((y_test == 2).sum())
# 12
#pandas.DataFrame, Seriesの場合
#pandas.DataFrame, Seriesはそれぞれ二次元・一次元の配列と同様に分割できる。
#ここでもアイリスデータセットを例とする。
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
data = load_iris()
X_df = pd.DataFrame(data['data'], columns=data['feature_names'])
y_s = | pd.Series(data['target']) | pandas.Series |
import pickle
import pandas as pd
import ot
import argparse
from scipy.spatial.distance import cdist
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import KMeans
from collections import Counter
from scipy.stats import entropy
from collections import defaultdict
import numpy as np
import os
import sys
def combine_clusters(labels, embeddings, threshold=10, remove=[]):
cluster_embeds = defaultdict(list)
for label, embed in zip(labels, embeddings):
cluster_embeds[label].append(embed)
min_num_examples = threshold
legit_clusters = []
for id, num_examples in Counter(labels).items():
if num_examples >= threshold:
legit_clusters.append(id)
if id not in remove and num_examples < min_num_examples:
min_num_examples = num_examples
min_cluster_id = id
if len(set(labels)) == 2:
return labels
min_dist = 1
all_dist = []
cluster_labels = ()
embed_list = list(cluster_embeds.items())
for i in range(len(embed_list)):
for j in range(i+1,len(embed_list)):
id, embed = embed_list[i]
id2, embed2 = embed_list[j]
if id in legit_clusters and id2 in legit_clusters:
dist = compute_averaged_embedding_dist(embed, embed2)
all_dist.append(dist)
if dist < min_dist:
min_dist = dist
cluster_labels = (id, id2)
std = np.std(all_dist)
avg = np.mean(all_dist)
limit = avg - 2 * std
if min_dist < limit:
for n, i in enumerate(labels):
if i == cluster_labels[0]:
labels[n] = cluster_labels[1]
return combine_clusters(labels, embeddings, threshold, remove)
if min_num_examples >= threshold:
return labels
min_dist = 1
cluster_labels = ()
for id, embed in cluster_embeds.items():
if id != min_cluster_id:
dist = compute_averaged_embedding_dist(embed, cluster_embeds[min_cluster_id])
if dist < min_dist:
min_dist = dist
cluster_labels = (id, min_cluster_id)
if cluster_labels[0] not in legit_clusters:
for n, i in enumerate(labels):
if i == cluster_labels[0]:
labels[n] = cluster_labels[1]
else:
if min_dist < limit:
for n, i in enumerate(labels):
if i == cluster_labels[0]:
labels[n] = cluster_labels[1]
else:
remove.append(min_cluster_id)
return combine_clusters(labels, embeddings, threshold, remove)
def compute_jsd(p, q):
p = np.asarray(p)
q = np.asarray(q)
m = (p + q) / 2
return (entropy(p, m) + entropy(q, m)) / 2
def cluster_word_embeddings_aff_prop(word_embeddings):
clustering = AffinityPropagation().fit(word_embeddings)
labels = clustering.labels_
exemplars = clustering.cluster_centers_
return labels, exemplars
def cluster_word_embeddings_k_means(word_embeddings, k, random_state):
clustering = KMeans(n_clusters=k, random_state=random_state).fit(word_embeddings)
labels = clustering.labels_
exemplars = clustering.cluster_centers_
return labels, exemplars
def compute_averaged_embedding_dist(t1_embeddings, t2_embeddings):
t1_mean = np.mean(t1_embeddings, axis=0)
t2_mean = np.mean(t2_embeddings, axis=0)
dist = 1.0 - cosine_similarity([t1_mean], [t2_mean])[0][0]
#print("Averaged embedding cosine dist:", dist)
return dist
def compute_divergence_from_cluster_labels(embeds1, embeds2, labels1, labels2, threshold):
labels_all = list(np.concatenate((labels1, labels2)))
counts1 = Counter(labels1)
counts2 = Counter(labels2)
n_senses = list(set(labels_all))
#print("Clusters:", len(n_senses))
t1 = []
t2 = []
label_list = []
for i in n_senses:
if counts1[i] + counts2[i] > threshold:
t1.append(counts1[i])
t2.append(counts2[i])
label_list.append(i)
t1 = np.array(t1)
t2 = np.array(t2)
emb1_means = np.array([np.mean(embeds1[labels1 == clust], 0) for clust in label_list])
emb2_means = np.array([np.mean(embeds2[labels2 == clust], 0) for clust in label_list])
M = np.nan_to_num(np.array([cdist(emb1_means, emb2_means, metric='cosine')])[0], nan=1)
t1_dist = t1 / t1.sum()
t2_dist = t2 / t2.sum()
wass = ot.emd2(t1_dist, t2_dist, M)
jsd = compute_jsd(t1_dist, t2_dist)
return jsd, wass
def detect_meaning_gain_and_loss(labels1, labels2, threshold):
labels1 = list(labels1)
labels2 = list(labels2)
all_count = Counter(labels1 + labels2)
first_count = Counter(labels1)
second_count = Counter(labels2)
gained_meaning = False
lost_meaning = False
all = 0
meaning_gain_loss = 0
for label, c in all_count.items():
all += c
if c >= threshold:
if label not in first_count or first_count[label] <= 2:
gained_meaning=True
meaning_gain_loss += c
if label not in second_count or second_count[label] <= 2:
lost_meaning=True
meaning_gain_loss += c
return str(gained_meaning) + '/' + str(lost_meaning), meaning_gain_loss/all
def compute_divergence_across_many_periods(embeddings, labels, splits, corpus_slices, threshold, method):
all_clusters = []
all_embeddings = []
clusters_dict = {}
for split_num, split in enumerate(splits):
if split_num > 0:
clusters = labels[splits[split_num-1]:split]
clusters_dict[corpus_slices[split_num - 1]] = clusters
all_clusters.append(clusters)
ts_embeds = embeddings[splits[split_num - 1]:split]
all_embeddings.append(ts_embeds)
all_measures = []
all_meanings = []
for i in range(len(all_clusters)):
if i < len(all_clusters) -1:
try:
jsd, wass = compute_divergence_from_cluster_labels(all_embeddings[i],all_embeddings[i+1], all_clusters[i],all_clusters[i+1], threshold)
except:
jsd, wass = 0, 0
meaning, meaning_score = detect_meaning_gain_and_loss(all_clusters[i],all_clusters[i+1], threshold)
all_meanings.append(meaning)
if method == 'WD':
measure = wass
else:
measure = jsd
all_measures.append(measure)
try:
entire_jsd, entire_wass = compute_divergence_from_cluster_labels(all_embeddings[0],all_embeddings[-1], all_clusters[0],all_clusters[-1], threshold)
except:
entire_jsd, entire_wass = 0, 0
meaning, meaning_score = detect_meaning_gain_and_loss(all_clusters[0],all_clusters[-1], threshold)
all_meanings.append(meaning)
avg_measure = sum(all_measures)/len(all_measures)
try:
measure = entire_wass
except:
measure = 0
all_measures.extend([measure, avg_measure])
all_measures = [float("{:.6f}".format(score)) for score in all_measures]
return all_measures, all_meanings, clusters_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Measure semantic shift')
parser.add_argument("--method", default='WD', const='all', nargs='?',
help="A method for calculating distance", choices=['WD', 'JSD'])
parser.add_argument("--corpus_slices",
default='1960;1990',
type=str,
help="Time slices names separated by ';'.")
parser.add_argument("--get_additional_info", action="store_true", help='Whether the cluster labels and sentences, required for interpretation, are saved or not.')
parser.add_argument('--results_dir_path', type=str, default='results_coha', help='Path to the folder to save the results.')
parser.add_argument('--embeddings_path', type=str, default='embeddings/coha_fine_tuned_scalable.pickle', help='Path to the embeddings pickle file.')
parser.add_argument('--define_words_to_interpret', type=str, default='', help='Define a set of words separated by ";" for interpretation if you do not wish to save data for all words.')
parser.add_argument('--random_state', type=int, default=123, help='Choose a random state for reproducibility of clustering.')
parser.add_argument('--cluster_size_threshold', type=int, default=10, help='Clusters smaller than a threshold will be merged or deleted.')
args = parser.parse_args()
random_state = args.random_state
threshold = args.cluster_size_threshold
get_additional_info = args.get_additional_info
print(args.embeddings_path)
embeddings_file = args.embeddings_path
corpus_slices = args.corpus_slices.split(';')
print("Corpus slices:", corpus_slices)
methods = ['WD', 'JSD']
if args.method not in methods:
print("Method not valid, valid choices are: ", ", ".join(methods))
sys.exit()
print("Loading ", embeddings_file)
try:
bert_embeddings, count2sents = pickle.load(open(embeddings_file, 'rb'))
except:
bert_embeddings = pickle.load(open(embeddings_file, 'rb'))
count2sents = None
if len(args.define_words_to_interpret) > 0:
target_words = args.define_words_to_interpret.split(';')
else:
target_words = list(bert_embeddings.keys())
if get_additional_info and len(target_words) > 100:
print('Define a list of words to interpret with less than 100 words or set "get_additional_info" flag to False')
sys.exit()
measure_vec = []
cosine_dist_vec = []
sentence_dict = {}
aff_prop_labels_dict = {}
aff_prop_centroids_dict = {}
kmeans_5_labels_dict = {}
kmeans_5_centroids_dict = {}
kmeans_7_labels_dict = {}
kmeans_7_centroids_dict = {}
aff_prop_pref = -430
print("Clustering BERT embeddings")
print("Len target words: ", len(target_words))
results = []
print("Words in embeds: ", bert_embeddings.keys())
for i, word in enumerate(target_words):
print("\n=======", i + 1, "- word:", word.upper(), "=======")
if word not in bert_embeddings:
continue
emb = bert_embeddings[word]
if i == 0:
print("Time periods in embeds: ", emb.keys())
all_embeddings = []
all_sentences = {}
splits = [0]
all_slices_present = True
all_freqs = []
cs_counts = []
for cs in corpus_slices:
cs_embeddings = []
cs_sentences = []
count_all = 0
text_seen = set()
if cs not in emb:
all_slices_present = False
print('Word missing in slice: ', cs)
continue
counts = [x[1] for x in emb[cs]]
cs_counts.append(sum(counts))
all_freqs.append(sum(counts))
cs_text = cs + '_text'
print("Slice: ", cs)
print("Num embeds: ", len(emb[cs]))
num_sent_codes = 0
for idx in range(len(emb[cs])):
#get summed embedding and its count, devide embedding by count
try:
e, count_emb = emb[cs][idx]
e = e/count_emb
except:
e = emb[cs][idx]
sents = set()
#print("Num sentences: ", len(sent_codes))
if count2sents is not None:
sent_codes = emb[cs_text][idx]
num_sent_codes += len(sent_codes)
for sent in sent_codes:
if sent in count2sents[cs]:
text = count2sents[cs][sent]
sents.add(text)
#print(text)
cs_embeddings.append(e)
cs_sentences.append(" ".join(list(sents)))
all_embeddings.append(np.array(cs_embeddings))
all_sentences[cs] = cs_sentences
splits.append(splits[-1] + len(cs_embeddings))
print("Num all sents: ", num_sent_codes)
print("Num words in corpus slice: ", cs_counts)
embeddings_concat = np.concatenate(all_embeddings, axis=0)
#we can not use kmeans7 if there are less than 7 examples
if embeddings_concat.shape[0] < 7 or not all_slices_present:
continue
else:
aff_prop_labels, aff_prop_centroids = cluster_word_embeddings_aff_prop(embeddings_concat)
aff_prop_labels = combine_clusters(aff_prop_labels, embeddings_concat, threshold=threshold, remove=[])
all_aff_prop_measures, all_meanings, clustered_aff_prop_labels = compute_divergence_across_many_periods(embeddings_concat, aff_prop_labels, splits, corpus_slices, threshold, args.method)
kmeans_5_labels, kmeans_5_centroids = cluster_word_embeddings_k_means(embeddings_concat, 5, random_state)
kmeans_5_labels = combine_clusters(kmeans_5_labels, embeddings_concat, threshold=threshold, remove=[])
all_kmeans5_measures, all_meanings, clustered_kmeans_5_labels = compute_divergence_across_many_periods(embeddings_concat, kmeans_5_labels, splits, corpus_slices, threshold, args.method)
kmeans_7_labels, kmeans_7_centroids = cluster_word_embeddings_k_means(embeddings_concat, 7, random_state)
kmeans_7_labels = combine_clusters(kmeans_7_labels, embeddings_concat, threshold=threshold, remove=[])
all_kmeans7_measures, all_meanings, clustered_kmeans_7_labels = compute_divergence_across_many_periods(embeddings_concat, kmeans_7_labels, splits, corpus_slices, threshold, args.method)
all_freqs = all_freqs + [sum(all_freqs)] + [sum(all_freqs)/len(all_freqs)]
word_results = [word] + all_aff_prop_measures + all_kmeans5_measures + all_kmeans7_measures + all_freqs + all_meanings
print("Results:", word_results)
results.append(word_results)
#add results to dataframe for saving
if get_additional_info:
sentence_dict[word] = all_sentences
aff_prop_labels_dict[word] = clustered_aff_prop_labels
aff_prop_centroids_dict[word] = aff_prop_centroids
kmeans_5_labels_dict[word] = clustered_kmeans_5_labels
kmeans_5_centroids_dict[word] = kmeans_5_centroids
kmeans_7_labels_dict[word] = clustered_kmeans_7_labels
kmeans_7_centroids_dict[word] = kmeans_7_centroids # add results to dataframe for saving
columns = ['word']
methods = ['AP', 'K5', 'K7', 'FREQ', 'MEANING GAIN/LOSS']
for method in methods:
for num_slice, cs in enumerate(corpus_slices):
if method == 'FREQ':
columns.append(method + ' ' + cs)
else:
if num_slice < len(corpus_slices) - 1:
columns.append(method + ' ' + cs + '-' + corpus_slices[num_slice + 1])
columns.append(method + ' All')
if method != 'MEANING GAIN/LOSS':
columns.append(method + ' Avg')
if not os.path.exists(args.results_dir_path):
os.makedirs(args.results_dir_path)
csv_file = os.path.join(args.results_dir_path, "word_ranking_results_" + args.method + ".csv")
# save results to CSV
results_df = | pd.DataFrame(results, columns=columns) | pandas.DataFrame |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = pd.Series([10, 1000, -100, 500, 500], index=default_index)
flags = tasks.validate_dni(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 0, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dni(mocker, make_observation,
default_index):
obs = make_observation('dni')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dhi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dhi_limits_QCRad']]
obs = make_observation('dhi')
data = pd.Series([10, 1000, -100, 200, 200], index=default_index)
flags = tasks.validate_dhi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dhi(mocker, make_observation,
default_index):
obs = make_observation('dhi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_poa_global(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_poa_clearsky']]
obs = make_observation('poa_global')
data = pd.Series([10, 1000, -400, 300, 300], index=default_index)
flags = tasks.validate_poa_global(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_poa_global(mocker, make_observation,
default_index):
obs = make_observation('poa_global')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_air_temp(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_temperature_limits']]
obs = make_observation('air_temperature')
data = pd.Series([10, 1000, -400, 30, 20], index=default_index)
flags = tasks.validate_air_temperature(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_air_temperature(
mocker, make_observation, default_index):
obs = make_observation('air_temperature')
data = pd.DataFrame(
[(0, 0), (200, 0), (20, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_wind_speed(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_wind_limits']]
obs = make_observation('wind_speed')
data = pd.Series([10, 1000, -400, 3, 20], index=default_index)
flags = tasks.validate_wind_speed(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_wind_speed(
mocker, make_observation, default_index):
obs = make_observation('wind_speed')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_relative_humidity(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_rh_limits']]
obs = make_observation('relative_humidity')
data = pd.Series([10, 101, -400, 60, 20], index=default_index)
flags = tasks.validate_relative_humidity(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_relative_humidity(
mocker, make_observation, default_index):
obs = make_observation('relative_humidity')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (40, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_ac_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ac_power_limits']]
obs = make_observation('ac_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power(mocker, make_observation,
default_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dc_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dc_power_limits']]
obs = make_observation('dc_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power(mocker, make_observation,
default_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_daily_ghi(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('ghi')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[10, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ghi_daily(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_fetch_and_validate_observation_ghi_zeros(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)] * 13,
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
base = (
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
base,
base,
base,
base,
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_dc_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('dc_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_ac_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation',
'detect_clipping']]
obs = make_observation('ac_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(10, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity', 'net_load',
])
def test_fetch_and_validate_observation_other(var, mocker, make_observation,
daily_index):
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = | pd.Series(2, index=daily_index) | pandas.Series |
from py4web import action
from py4web import response
#index is used without html as I dont need html code
@action("index")
def index():
return "Hello World"
@action("mpl_barchart")
def mpl_barchart():
# from py4web import response
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import io
from py4web import response
response.headers['Content-Type']='image/png'
np.random.seed(19680801)
plt.rcdefaults()
fig, ax = plt.subplots()
## Example data
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
ax.barh(y_pos, performance, xerr=error, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(people)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Performance')
ax.set_title('How fast do you want to go today?')
#plt.show()
s = io.BytesIO()
canvas=FigureCanvas(fig)
canvas.print_png(s)
return s.getvalue()
@action("mpl_sinusSubPlots")
def mpl_sinusSubPlots():
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import io
# from py4web import response #needs to be imported, can be done outside
response.headers['Content-Type']='image/png'
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return s1 * e1
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
t3 = np.arange(0.0, 2.0, 0.01)
fig, axs = plt.subplots(3, 1, constrained_layout=True) #3 defines the number of plots, prereserved in an array
fig.suptitle('This is a somewhat long figure title', fontsize=16)
axs[0].plot(t3, np.cos(2*np.pi*t3), '--')
axs[0].set_title('subplot 1')
axs[0].set_xlabel('time (s)')
axs[0].set_ylabel('Undamped')
axs[1].plot(t3, np.cos(1*np.pi*t3), '--')
axs[1].set_title('subplot 2')
axs[1].set_xlabel('time (s)')
axs[1].set_ylabel('Undamped')
axs[2].plot(t1, f(t1), 'o', t2, f(t2), '-')
axs[2].set_title('subplot 3')
axs[2].set_xlabel('distance (m)')
axs[2].set_ylabel('Damped oscillation')
#plt.show()
s1 = io.BytesIO()
canvas=FigureCanvas(fig)
canvas.print_png(s1)
return s1.getvalue()
@action("mpl_linesHaV")
def mpl_linesHaV():
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rnd
import io
# from py4web import response #needs to be imported, can be done outside
response.headers['Content-Type']='image/png'
def f(t):
s1 = np.sin(2 * np.pi * t)
e1 = np.exp(-t)
return np.absolute((s1 * e1)) + .05
t = np.arange(0.0, 5.0, 0.1)
s = f(t)
nse = rnd.normal(0.0, 0.3, t.shape) * s
fig = plt.figure(figsize=(12, 6))
vax = fig.add_subplot(121)
hax = fig.add_subplot(122)
vax.plot(t, s + nse, '^')
vax.vlines(t, [0], s)
vax.set_xlabel('time (s)')
vax.set_title('Vertical lines demo')
hax.plot(s + nse, t, '^')
hax.hlines(t, [0], s, lw=2)
hax.set_xlabel('time (s)')
hax.set_title('Horizontal lines demo')
#plt.show()
s = io.BytesIO()
canvas=FigureCanvas(fig)
canvas.print_png(s)
return s.getvalue()
@action("mpl_violinplot")
def mpl_violinplot():
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import io
# from py4web import response #needs to be imported, can be done outside
response.headers['Content-Type']='image/png'
np.random.seed(19680801)
fs = 10 # fontsize
pos = [1, 2, 4, 5, 7, 8]
data = [np.random.normal(0, std, size=100) for std in pos]
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 6))
axes[0, 0].violinplot(data, pos, points=20, widths=0.3,
showmeans=True, showextrema=True, showmedians=True)
axes[0, 0].set_title('Custom violinplot 1', fontsize=fs)
axes[0, 1].violinplot(data, pos, points=40, widths=0.5,
showmeans=True, showextrema=True, showmedians=True,
bw_method='silverman')
axes[0, 1].set_title('Custom violinplot 2', fontsize=fs)
axes[0, 2].violinplot(data, pos, points=60, widths=0.7, showmeans=True,
showextrema=True, showmedians=True, bw_method=0.5)
axes[0, 2].set_title('Custom violinplot 3', fontsize=fs)
axes[1, 0].violinplot(data, pos, points=80, vert=False, widths=0.7,
showmeans=True, showextrema=True, showmedians=True)
axes[1, 0].set_title('Custom violinplot 4', fontsize=fs)
axes[1, 1].violinplot(data, pos, points=100, vert=False, widths=0.9,
showmeans=True, showextrema=True, showmedians=True,
bw_method='silverman')
axes[1, 1].set_title('Custom violinplot 5', fontsize=fs)
axes[1, 2].violinplot(data, pos, points=200, vert=False, widths=1.1,
showmeans=True, showextrema=True, showmedians=True,
bw_method=0.5)
axes[1, 2].set_title('Custom violinplot 6', fontsize=fs)
for ax in axes.flat:
ax.set_yticklabels([])
fig.suptitle("Violin Plotting Examples")
fig.subplots_adjust(hspace=0.4)
#plt.show()
s = io.BytesIO()
canvas=FigureCanvas(fig)
canvas.print_png(s)
return s.getvalue()
@action("mpl_plot")
def mpl_plot(title='title',xlab='x',ylab='y',mode='plot',
data={'xxx':[(0,0),(1,1),(1,2),(3,3)],
'yyy':[(0,0,.2,.2),(2,1,0.2,0.2),(2,2,0.2,0.2),
(3,3,0.2,0.3)]}):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import io
response.headers['Content-Type']='image/png'
fig=Figure()
fig.set_facecolor('white')
ax=fig.add_subplot(111)
if title: ax.set_title(title)
if xlab: ax.set_xlabel(xlab)
if ylab: ax.set_ylabel(ylab)
legend=[]
keys=sorted(data)
for key in keys:
stream = data[key]
(x,y)=([],[])
for point in stream:
x.append(point[0])
y.append(point[1])
if mode=='plot':
ell=ax.plot(x, y)
legend.append((ell,key))
if mode=='hist':
ell=ax.hist(y,20)
if legend:
ax.legend([[x for (x,y) in legend], [y for (x,y) in
legend]],
'upper right', shadow=True)
canvas=FigureCanvas(fig)
stream=io.BytesIO() #stream=cStringIO.StringIO()
canvas.print_png(stream)
return stream.getvalue()
@action("mpl_numpytst")
def mpl_numpytst():
response.headers['Content-Type']='image/png'
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import io
x = [1, 2, 3, 4, 5]
y = [1, 4, 9, 16, 25]
fig=Figure()
fig = plt.figure()
fig.add_subplot(111)
p1 = plt.scatter(x, y)
s = io.BytesIO()
canvas=FigureCanvas(fig)
canvas.print_png(s)
return s.getvalue()
@action("mpl_line_json")
def mpl_line_json():
import pandas as pd
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import io
response.headers['Content-Type']='image/png'
d = {"sell": [
{
"Rate": 0.425,
"Quantity": 0.25
},
{
"Rate": 0.6425,
"Quantity": 0.40
},
{
"Rate": 0.7025,
"Quantity": 0.8
},
{
"Rate": 0.93,
"Quantity": 0.59
}
]}
df = | pd.DataFrame(d['sell']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import argparse
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
import math
import copy
import sklearn
import sklearn.cluster
import random
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score, davies_bouldin_score,v_measure_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import NMF
from sklearn.decomposition import PCA
import multiprocessing as mp
from functools import partial
from scipy.spatial import distance
import os
from scipy.stats import norm
from scipy.stats import multivariate_normal
from scipy.stats import ttest_ind
from scipy.stats import ks_2samp
from hmmlearn import hmm
from scipy.io import mmread
from scipy.sparse import csr_matrix
import multiprocessing
import warnings
os.environ['NUMEXPR_MAX_THREADS'] = '50'
def jointLikelihoodEnergyLabels_helper(label,data,states,norms):
e = 1e-50
r0 = [x for x in range(data.shape[0]) if states[x,label]==0]
l0 = np.sum(-np.log(np.asarray(norms[0].pdf(data[r0,:])+e)),axis=0)
r1 = [x for x in range(data.shape[0]) if states[x,label]==1]
l1 = np.sum(-np.log(np.asarray(norms[1].pdf(data[r1,:])+e)),axis=0)
r2 = [x for x in range(data.shape[0]) if states[x,label]==2]
l2 = np.sum(-np.log(np.asarray(norms[2].pdf(data[r2,:])+e)),axis=0)
return l0 + l1 + l2
def init_helper(i,data, n_clusters,normal,diff,labels,c):
l = []
for k in range(n_clusters):
pval = ks_2samp(data[i,labels==k],normal[i,:])[1]
mn = np.mean(normal[i,:])
if c[i,k]< mn and pval <= diff:
l.append(0)
elif c[i,k]> mn and pval <= diff:
l.append(2)
else:
l.append(1)
return np.asarray(l).astype(int)
def HMM_helper(inds, data, means, sigmas ,t, num_states, model,normal):
ind_bin,ind_spot,k = inds
data = data[np.asarray(ind_bin)[:, None],np.asarray(ind_spot)]
data2 = np.mean(data,axis=1)
X = np.asarray([[x] for x in data2])
C = np.asarray(model.predict(X))
score = model.score(X)
#bootstrap
b=3
for i in range(b):
inds = random.sample(range(data.shape[1]),int(data.shape[1]*.8+1))
data2 = np.mean(data[:,inds],axis=1)
X = np.asarray([[x] for x in data2])
C2 = np.asarray(model.predict(X))
for j,c in enumerate(C2):
if C[j] != c:
C[j] = 1
return [C,score]
class STARCH:
"""
This is a class for Hidden Markov Random Field for calling Copy Number Aberrations
using spatial relationships and gene adjacencies along chromosomes
"""
def __init__(self,data,normal_spots=[],labels=[],beta_spots=2,n_clusters=3,num_states=3,gene_mapping_file_name='hgTables_hg19.txt',nthreads=0):
"""
The constructor for HMFR_CNA
Parameters:
data (pandas data frame): gene x spot (or cell).
colnames = 2d or 3d indices (eg. 5x18, 5x18x2 if multiple layers).
rownames = HUGO gene name
"""
if nthreads == 0:
nthreads = int(multiprocessing.cpu_count() / 2 + 1)
logger.info('Running with ' + str(nthreads) + ' threads')
logger.info("initializing HMRF...")
self.beta_spots = beta_spots
self.gene_mapping_file_name = gene_mapping_file_name
self.n_clusters = int(n_clusters)
dat,data = self.preload(data)
logger.info(str(self.rows[0:20]))
logger.info(str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(data.shape))
if isinstance(normal_spots, str):
self.read_normal_spots(normal_spots)
if normal_spots == []:
self.get_normal_spots(data)
else:
self.normal_spots = np.asarray([int(x) for x in normal_spots])
logger.info('normal spots ' + str(len(self.normal_spots)))
dat = self.preprocess_data(data,dat)
logger.info('done preprocessing...')
self.data = self.data * 1000
self.bins = self.data.shape[0]
self.spots = self.data.shape[1]
self.tumor_spots = np.asarray([int(x) for x in range(self.spots) if int(x) not in self.normal_spots])
self.normal = self.data[:,self.normal_spots]
self.data = self.data[:,self.tumor_spots]
self.bins = self.data.shape[0]
self.spots = self.data.shape[1]
self.num_states = int(num_states)
self.normal_state = int((self.num_states-1)/2)
logger.info('getting spot network...')
self.get_spot_network(self.data,self.columns[self.tumor_spots])
if isinstance(labels, str):
self.get_labels(labels)
if len(labels)>0:
self.labels = labels
else:
logger.info('initializing labels...')
self.initialize_labels()
logger.debug('starting labels: '+str(self.labels))
np.fill_diagonal(self.spot_network, 0)
logger.info('getting params...')
for d in range(10 ,20,1):
try:
self.init_params(d/10,nthreads)
break
except:
continue
self.states = np.zeros((self.bins,self.n_clusters))
logger.info('starting means: '+str(self.means))
logger.info('starting cov: '+str(self.sigmas))
logger.info(str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(self.data.shape))
def to_transpose(self,sep,data):
dat = pd.read_csv(data,sep=sep,header=0,index_col=0)
if 'x' in dat.index.values[0] and 'x' in dat.index.values[1] and 'x' in dat.index.values[2]:
return True
return False
def which_sep(self,data):
dat = np.asarray(pd.read_csv(data,sep='\t',header=0,index_col=0)).size
dat2 = np.asarray(pd.read_csv(data,sep=',',header=0,index_col=0)).size
dat3 = np.asarray(pd.read_csv(data,sep=' ',header=0,index_col=0)).size
if dat > dat2 and dat > dat3:
return '\t'
elif dat2 > dat and dat2 > dat3:
return ','
else:
return ' '
def get_bin_size(self,data,chroms):
for bin_size in range(20,100):
test = self.bin_data2(data[:,self.normal_spots],chroms,bin_size=bin_size,step_size=1)
test = test[test!=0]
logger.debug(str(bin_size)+' mean expression binned ' + str(np.mean(test)))
logger.debug(str(bin_size)+' median expression binned ' + str(np.median(test)))
if np.median(test) >= 10:
break
logger.info('selected bin size: ' + str(bin_size))
return bin_size
def preload(self,l):
if isinstance(l,list): # list of multiple datasets
offset = 0
dats = []
datas = []
for data in l:
dat,data = self.load(data)
datas.append(data)
dats.append(dat)
conserved_genes = []
inds = []
for dat in dats:
inds.append([])
for gene in dats[0].index.values:
inall = True
for dat in dats:
if gene not in dat.index.values:
inall = False
if inall:
conserved_genes.append(gene)
for i,dat in enumerate(dats):
ind = inds[i]
ind.append(np.where(dat.index.values == gene)[0][0])
inds[i] = ind
conserved_genes = np.asarray(conserved_genes)
logger.info(str(conserved_genes))
newdatas = []
newdats = []
for i in range(len(datas)):
data = datas[i]
dat = dats[i]
ind = np.asarray(inds[i])
newdatas.append(data[ind,:])
newdats.append(dat.iloc[ind,:])
for dat in newdats:
spots = np.asarray([[float(y) for y in x.split('x')] for x in dat.columns.values])
for spot in spots:
spot[0] += offset
spots = ['x'.join([str(y) for y in x]) for x in spots]
dat.columns = spots
offset += 100
data = np.concatenate(newdatas,axis=1)
dat = pd.concat(newdats,axis=1)
self.rows = dat.index.values
self.columns = dat.columns.values
else:
dat,data = self.load(l)
return dat,data
def load(self,data):
try:
if isinstance(data, str) and ('.csv' in data or '.tsv' in data or '.txt' in data):
logger.info('Reading data...')
sep = self.which_sep(data)
if self.to_transpose(sep,data):
dat = pd.read_csv(data,sep=sep,header=0,index_col=0).T
else:
dat = pd.read_csv(data,sep=sep,header=0,index_col=0)
elif isinstance(data,str):
logger.info('Importing 10X data from directory. Directory must contain barcodes.tsv, features.tsv, matrix.mtx, tissue_positions_list.csv')
barcodes = np.asarray(pd.read_csv(data + '/barcodes.tsv',header=None)).flatten()
genes = np.asarray(pd.read_csv(data + '/features.tsv',sep='\t',header=None))
genes = genes[:,1]
coords = np.asarray(pd.read_csv(data + '/tissue_positions_list.csv',sep=',',header=None))
d = dict()
for row in coords:
d[row[0]] = str(row[2]) + 'x' + str(row[3])
inds = []
coords2 = []
for i,barcode in enumerate(barcodes):
if barcode in d.keys():
inds.append(i)
coords2.append(d[barcode])
matrix = mmread(data + '/matrix.mtx').toarray()
logger.info(str(barcodes) + ' ' + str(barcodes.shape))
logger.info(str(genes) + ' ' + str(genes.shape))
logger.info(str(coords) + ' ' + str(coords.shape))
logger.info(str(matrix.shape))
matrix = matrix[:,inds]
genes,inds2 = np.unique(genes, return_index=True)
matrix = matrix[inds2,:]
dat = pd.DataFrame(matrix,index = genes,columns = coords2)
logger.info(str(dat))
else:
dat = pd.DataFrame(data)
except:
raise Exception("Incorrect input format")
logger.info('coords ' + str(len(dat.columns.values)))
logger.info('genes ' + str(len(dat.index.values)))
data = dat.values
logger.info(str(data.shape))
self.rows = dat.index.values
self.columns = dat.columns.values
return(dat,data)
def preprocess_data(self,data,dat):
logger.info('data shape ' + str(data.shape))
data,inds = self.filter_genes(data,min_cells=int(data.shape[1]/20))
logger.info('Filtered genes, now have ' + str(data.shape[0]) + ' genes')
data[data>np.mean(data)+np.std(data)*2]=np.mean(data)+np.std(data)*2
dat = dat.T[dat.index.values[inds]].T
self.rows = dat.index.values
self.columns = dat.columns.values
logger.info('filter ' + str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(data.shape))
data,chroms,pos,inds = self.order_genes_by_position(data,dat.index.values)
dat = dat.T[dat.index.values[inds]].T
self.rows = dat.index.values
self.columns = dat.columns.values
logger.info('order ' + str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(data.shape))
logger.info('zero percentage ' + str((data.size - np.count_nonzero(data)) / data.size))
bin_size = self.get_bin_size(data,chroms)
data = np.log(data+1)
data = self.library_size_normalize(data) #2
data = data-np.mean(data[:,self.normal_spots],axis=1).reshape(data.shape[0],1)
data = self.threshold_data(data,max_value=3.0)
data = self.bin_data(data,chroms,bin_size=bin_size,step_size=1)
data = self.center_at_zero(data) #7
data = data-np.mean(data[:,self.normal_spots],axis=1).reshape(data.shape[0],1)
data = np.exp(data)-1
self.data = data
self.pos = np.asarray([str(x) for x in pos])
logger.info('preprocess ' + str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(data.shape))
return(dat)
def read_normal_spots(self,normal_spots):
normal_spots = pd.read_csv(data,sep=',')
self.normal_spots = np.asarray([int(x) or x in np.asarray(normal_spots)])
def get_normal_spots(self,data):
data,k = self.filter_genes(data,min_cells=int(data.shape[1]/20)) # 1
data = self.library_size_normalize(data) #2
data = np.log(data+1)
data = self.threshold_data(data,max_value=3.0)
pca = PCA(n_components=1).fit_transform(data.T)
km = KMeans(n_clusters=2).fit(pca)
clusters = np.asarray(km.predict(pca))
if np.mean(data[:,clusters==0]) < np.mean(data[:,clusters==1]):
self.normal_spots = np.asarray([x for x in range(data.shape[1])])[clusters==0]
else:
self.normal_spots = np.asarray([x for x in range(data.shape[1])])[clusters==1]
def filter_genes(self,data,min_cells=20):
keep = []
for gene in range(data.shape[0]):
if np.count_nonzero(data[gene,:]) >= min_cells:
keep.append(gene)
return data[np.asarray(keep),:],np.asarray(keep)
def library_size_normalize(self,data):
m = np.median(np.sum(data,axis=0))
data = data / np.sum(data,axis=0)
data = data * m
return data
def threshold_data(self,data,max_value=4.0):
data[data> max_value] = max_value
data[data< -max_value] = -max_value
return data
def center_at_zero(self,data):
return data - np.median(data,axis=0).reshape(1,data.shape[1])
def bin_data2(self,data,chroms,bin_size,step_size):
newdata = copy.deepcopy(data)
i=0
c = np.asarray(list(set(chroms)))
c.sort()
for chrom in c:
data2 = data[chroms==chrom,:]
for gene in range(data2.shape[0]):
start = max(0,gene-int(bin_size/2))
end = min(data2.shape[0],gene+int(bin_size/2))
r = np.asarray([x for x in range(start,end)])
mean = np.sum(data2[r,:],axis=0)
newdata[i,:] = mean
i += 1
return newdata
def bin_data(self,data,chroms,bin_size,step_size):
newdata = copy.deepcopy(data)
i=0
c = np.asarray(list(set(chroms)))
c.sort()
for chrom in c:
data2 = data[chroms==chrom,:]
for gene in range(data2.shape[0]):
start = max(0,gene-int(bin_size/2))
end = min(data2.shape[0],gene+int(bin_size/2))
r = np.asarray([x for x in range(start,end)])
weighting = np.asarray([x+1 for x in range(start,end)])
weighting = abs(weighting - len(weighting)/2)
weighting = 1/(weighting+1)
weighting = weighting / sum(weighting) #pyramidinal weighting
weighting = weighting.reshape(len(r),1)
mean = np.sum(data2[r,:]*weighting,axis=0)
newdata[i,:] = mean
i += 1
return newdata
def order_genes_by_position(self,data,genes):
mapping = pd.read_csv(self.gene_mapping_file_name,sep='\t')
names = mapping['name2']
chroms = mapping['chrom']
starts = mapping['cdsStart']
ends = mapping['cdsEnd']
d = dict()
d2 = dict()
for i,gene in enumerate(names):
try:
if int(chroms[i][3:]) > 0:
d[gene.upper()] = int(int(chroms[i][3:])*1e10 + int(starts[i]))
d2[gene.upper()] = str(chroms[i][3:]) + ':' + str(starts[i])
except:
None
positions = []
posnames = []
for gene in genes:
gene = gene.upper()
if gene in d.keys():
positions.append(d[gene])
posnames.append(d2[gene])
else:
positions.append(-1)
posnames.append(-1)
positions = np.asarray(positions)
posnames = np.asarray(posnames)
l = len(positions[positions==-1])
order = np.argsort(positions)
order = order[l:]
positions = positions[order]/1e10
posnames = posnames[order]
return data[order,:],positions.astype('int'),posnames,order
def get_labels(self,labels):
labels = np.asarray(pd.read_csv(data,sep=','))
self.labels = labels
def init_params(self,d=1.3,nthreads=1):
c = np.zeros((self.data.shape[0],self.n_clusters))
for i in range(self.data.shape[0]):
for k in range(self.n_clusters):
c[i,k] = np.mean(self.data[i,self.labels==k])
labels = np.zeros((self.data.shape[0],self.n_clusters))
diffs = []
for i in range(0,self.data.shape[0],10):
diffs.append(ks_2samp(self.normal[i,:]+np.std(self.normal[i,:])/d,self.normal[i,:])[1])
diff = np.mean(diffs)
logger.info(str(diff))
pool = mp.Pool(nthreads)
results = pool.map(partial(init_helper, data=self.data, n_clusters=self.n_clusters,normal=self.normal,diff=diff,labels=self.labels,c=c), [x for x in range(self.data.shape[0])])
for i in range(len(results)):
labels[i,:] = results[i]
labels = labels.astype(int)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
means = [np.mean(c[labels==cluster]) for cluster in range(self.num_states)]
sigmas = [np.std(c[labels==cluster]) for cluster in range(self.num_states)]
indices = np.argsort([x for x in means])
states = copy.deepcopy(labels)
m = np.zeros((3,1))
s = np.zeros((3,1))
i=0
for index in indices:
states[labels==index]=i # set states
mean = means[index]
sigma = sigmas[index]
if np.isnan(mean) or np.isnan(sigma) or sigma < .01:
raise ValueError()
m[i] = [mean]
s[i] = [sigma**2]
i+=1
self.means = m
self.sigmas = s
def init_params2(self):
means = [[],[],[]]
sigmas = [[],[],[]]
for s in range(self.num_states):
d=[]
for cluster in range(self.n_clusters):
dat = np.asarray(list(self.data[:,self.labels==cluster]))
d += list(dat[np.asarray(list(self.states[:,cluster].astype(int)==int(s)))].flatten())
means[s] = [np.mean(d)]
sigmas[s] = [np.std(d)**2]
logger.info(str(means))
self.means = np.asarray(means)
self.sigmas = np.asarray(sigmas)
def initialize_labels(self):
dat=self.data
km = KMeans(n_clusters=self.n_clusters).fit(dat.T)
clusters = np.asarray(km.predict(dat.T))
self.labels = clusters
def get_spot_network(self,data,spots,l=1):
spots = np.asarray([[float(y) for y in x.split('x')] for x in spots])
spot_network = np.zeros((len(spots),len(spots)))
for i in range(len(spots)):
for j in range(i,len(spots)):
dist = distance.euclidean(spots[i],spots[j])
spot_network[i,j] = np.exp(-dist/(l)) # exponential covariance
spot_network[j,i] = spot_network[i,j]
self.spot_network = spot_network
def get_gene_network(self,data,genes,l=1):
genes = np.asarray(genes)
gene_network = np.zeros((len(genes),len(genes)))
for i in range(len(genes)):
for j in range(i,len(genes)):
dist = j-i
gene_network[i,j] = np.exp(-dist/(l)) # exponential covariance
gene_network[j,i] = gene_network[i,j]
return gene_network
def _optimalK(self,data, maxClusters=15):
X_scaled = data
km_scores= []
km_silhouette = []
db_score = []
for i in range(2,maxClusters):
km = KMeans(n_clusters=i).fit(X_scaled)
preds = km.predict(X_scaled)
silhouette = silhouette_score(X_scaled,preds)
km_silhouette.append(silhouette)
logger.info("Silhouette score for number of cluster(s) {}: {}".format(i,silhouette))
best_silouette = np.argmax(km_silhouette)+2
best_db = np.argmin(db_score)+2
logger.info('silhouette: ' + str(best_silouette))
return(int(best_silouette))
def HMM_estimate_states_parallel(self,t,maxiters=100,deltoamp=0,nthreads=1):
n_clusters = self.n_clusters
self.EnergyPriors = np.zeros((self.data.shape[0],n_clusters,self.num_states))
self.t = t
chromosomes = [int(x.split(':')[0]) for x in self.pos]
inds = []
n_clusters = self.n_clusters
if len(set(self.labels)) != self.n_clusters:
labels = copy.deepcopy(self.labels)
i=0
for label in set(self.labels):
labels[self.labels==label]=i
i=i+1
self.labels = labels
self.n_clusters = len(set(self.labels))
for chrom in set(chromosomes):
for k in range(self.n_clusters):
inds.append([np.asarray([i for i in range(len(chromosomes)) if chromosomes[i] == chrom]),np.asarray([i for i in range(len(self.labels)) if self.labels[i]==k]),k])
pool = mp.Pool(nthreads)
results = pool.map(partial(HMM_helper, data=self.data, means = self.means, sigmas = self.sigmas,t = self.t,num_states = self.num_states,model=self.model,normal=self.normal), inds)
score = 0
for i in range(len(results)):
self.states[inds[i][0][:, None],inds[i][2]] = results[i][0].reshape((len(results[i][0]),1))
score += results[i][1]
return score
def jointLikelihoodEnergyLabels(self,norms,pool):
Z = (2*math.pi)**(self.num_states/2)
n_clusters = self.n_clusters
likelihoods = np.zeros((self.data.shape[1],n_clusters))
results = pool.map(partial(jointLikelihoodEnergyLabels_helper, data=self.data, states=self.states,norms=norms), range(n_clusters))
for label in range(n_clusters):
likelihoods[:,label] += results[label]
likelihoods = likelihoods / self.data.shape[0]
likelihood_energies = likelihoods
return(likelihood_energies)
def jointLikelihoodEnergyLabelsapprox(self,means):
e = 1e-20
n_clusters = self.n_clusters
likelihoods = np.zeros((self.data.shape[1],n_clusters))
for spot in range(self.spots):
ml=np.inf
for label in range(n_clusters):
likelihood = np.sum(abs(self.data[:,spot]-means[:,label]))/self.data.shape[0]
if likelihood < ml:
ml = likelihood
likelihoods[spot,label] = likelihood
likelihoods[spot,:]-=ml
likelihood_energies = likelihoods
return(likelihood_energies)
def MAP_estimate_labels(self,beta_spots,nthreads,maxiters=20):
inds_spot = []
tmp_spot = []
n_clusters = self.n_clusters
prev_labels = copy.deepcopy(self.labels)
for j in range(self.spots):
inds_spot.append(np.where(self.spot_network[j,:] >= .25)[0])
tmp_spot.append(self.spot_network[j,inds_spot[j]])
logger.debug(str(tmp_spot))
pool = mp.Pool(nthreads)
norms = [norm(self.means[0][0],np.sqrt(self.sigmas[0][0])),norm(self.means[1][0],np.sqrt(self.sigmas[1][0])),norm(self.means[2][0],np.sqrt(self.sigmas[2][0]))]
for m in range(maxiters):
posteriors = 0
means = np.zeros((self.bins,n_clusters))
for label in range(n_clusters):
means[:,label] = np.asarray([self.means[int(i)][0] for i in self.states[:,label]])
likelihood_energies = self.jointLikelihoodEnergyLabels(norms,pool)
#likelihood_energies = self.jointLikelihoodEnergyLabelsapprox(means)
for j in range(self.spots):
p = [((np.sum(tmp_spot[j][self.labels[inds_spot[j]] != label]))) for label in range(n_clusters)]
val = [likelihood_energies[j,label]+beta_spots*1*p[label] for label in range(n_clusters)]
arg = np.argmin(val)
posteriors += val[arg]
self.labels[j] = arg
if np.array_equal(np.asarray(prev_labels),np.asarray(self.labels)): # check for convergence
break
prev_labels = copy.deepcopy(self.labels)
return(-posteriors)
def update_params(self):
c = np.zeros((self.data.shape[0],self.n_clusters))
for i in range(self.data.shape[0]):
for k in range(self.n_clusters):
c[i,k] = np.mean(self.data[i,self.labels==k])
means = [np.mean(c[self.states==cluster]) for cluster in range(self.num_states)]
sigmas = [np.std(c[self.states==cluster]) for cluster in range(self.num_states)]
indices = np.argsort([x for x in means])
m = np.zeros((3,1))
s = np.zeros((3,1))
i=0
for index in indices:
self.states[self.states==index]=i # set states
mean = means[index]
sigma = sigmas[index]
m[i] = [mean]
s[i] = [sigma**2]
i+=1
self.means = m
self.sigmas = s
logger.debug(str(self.means))
logger.debug(str(self.sigmas))
def callCNA(self,t=.00001,beta_spots=2,maxiters=20,deltoamp=0.0,nthreads=0,returnnormal=True):
"""
Run HMRF-EM framework to call CNA states by alternating between
MAP estimate of states given current params and EM estimate of
params given current states until convergence
Returns:
states (np array): integer CNA states (0 = del, 1 norm, 2 = amp)
"""
logger.info("running HMRF to call CNAs...")
states = [copy.deepcopy(self.states),copy.deepcopy(self.states)]
logger.debug('sum start:'+str(np.sum(states[-1])))
logger.info('beta spots: '+str(beta_spots))
if nthreads == 0:
nthreads = int(multiprocessing.cpu_count() / 2 + 1)
logger.info('Running with ' + str(nthreads) + ' threads')
X = []
lengths = []
for i in range(self.data.shape[1]):
X.append([[x] for x in self.data[:,i]])
lengths.append(len(self.data[:,i]))
X = np.concatenate(X)
model = hmm.GaussianHMM(n_components=self.num_states, covariance_type="diag",init_params="mc", params="",algorithm='viterbi')
model.transmat_ = np.array([[1-2*t, t, t],
[t, 1-2*t, t],
[t, t, 1-2*t]])
model.startprob_ = np.asarray([.1,.8,.1])
model.means_ = self.means
model.covars_ = self.sigmas
model.fit(X,lengths)
logger.info(str(model.means_))
logger.info(str(model.covars_))
logger.info(str(model.transmat_))
logger.info(str(model.startprob_))
self.model = model
for i in range(maxiters):
score_state = self.HMM_estimate_states_parallel(t=t,deltoamp=deltoamp,nthreads=nthreads)
self.init_params2()
score_label = self.MAP_estimate_labels(beta_spots=beta_spots,nthreads=nthreads,maxiters=20)
states.append(copy.deepcopy(self.states))
logger.debug('sum iter:'+str(i) + ' ' + str(np.sum(states[-1])))
if np.array_equal(states[-2],states[-1]) or np.array_equal(states[-3],states[-1]): # check for convergence
logger.info('states converged')
break
if len(states) > 3:
states = states[-3:]
logger.info('Posterior Energy: ' + str(score_state + score_label))
if returnnormal:
labels = np.asarray([self.n_clusters for i in range(len(self.columns))])
labels[self.tumor_spots] = self.labels
states = np.ones((self.states.shape[0],self.n_clusters+1))
for cluster in range(self.n_clusters):
states[:,cluster] = self.states[:,cluster]
self.labels = pd.DataFrame(data=labels,index=self.columns)
self.states = states
self.n_clusters += 1
else:
self.labels = pd.DataFrame(data=self.labels,index=self.columns[self.tumor_spots])
states = | pd.DataFrame(self.states) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import glob
import click
from pathlib import Path
from eye_tracking.preprocessing.functions.et_preprocess import preprocess_et
from eye_tracking.preprocessing.functions.detect_events import make_fixations, make_blinks, make_saccades
import warnings
warnings.filterwarnings("ignore")
def preprocess_eye(subj_fpath):
""" preprocess eye tracking data using code from https://github.com/teresa-canasbajo/bdd-driveratt/tree/master/eye_tracking/preprocessing
saves out preprocessed data for events, saccades, fixations
Args:
subj_fpath (str): full path to top-level directory of eye-tracking
"""
# get all sessions
ses_dirs = glob.glob(os.path.join(subj_fpath, '*ses*'))
# get subj name
subj = Path(subj_fpath).name
# loop over sessions
for ses_dir in ses_dirs:
# get sess name
sess = Path(ses_dir).name
# get all runs
run_dirs = glob.glob(os.path.join(ses_dir, '*'))
# loop over runs
for run_dir in run_dirs:
# get run name
run = Path(run_dir).name
# get preprocess dir
preprocess_dir = os.path.join(run_dir, 'preprocessed')
# check if data have already been preprocessed
if not os.path.isdir(preprocess_dir):
try:
data = preprocess_et(subject='', datapath=run_dir, surfaceMap=False, eventfunctions=(make_fixations, make_blinks, make_saccades))
# modify the msgs and save to disk
msgs_df = pd.read_csv(os.path.join(preprocess_dir, 'pl_msgs.csv'))
msgs = _modify_msgs(dataframe=msgs_df)
msgs.to_csv(os.path.join(preprocess_dir, f'{subj}_{sess}_{run}_pl_msgs.csv'))
# merge msgs to events and save to disk
events_df = pd.read_csv(os.path.join(preprocess_dir, 'pl_events.csv'))
events_msgs = _merge_msgs_events(events=events_df, msgs=msgs)
events_msgs.to_csv(os.path.join(preprocess_dir, f'{subj}_{sess}_{run}_pl_msgs_events.csv'))
# merge msgs to samples and save to disk
samples_df = pd.read_csv(os.path.join(preprocess_dir, 'pl_samples.csv'))
samples_msgs = _merge_msgs_samples(samples=samples_df, msgs=msgs)
samples_msgs.to_csv(os.path.join(preprocess_dir, f'{subj}_{sess}_{run}_pl_msgs_samples.csv'))
print('Preprocessing complete!')
except:
print('something went wrong with preprocessing ...')
else:
print('These data have already been preprocessed ...')
def concat_runs(subj_fpath):
# get all sessions
ses_dirs = glob.glob(os.path.join(subj_fpath, '*ses*'))
# get subj name
subj = Path(subj_fpath).name
df_events_all = pd.DataFrame()
df_samples_all = pd.DataFrame()
# loop over sessions
for ses_dir in np.sort(ses_dirs):
# get sess name
sess = Path(ses_dir).name
# get all runs
run_dirs = glob.glob(os.path.join(ses_dir, '*'))
# loop over runs
for run_dir in np.sort(run_dirs):
# get run name
run = Path(run_dir).name
# load preprocessed data for subj/sess
try:
df_events = pd.read_csv(os.path.join(subj_fpath, sess, run, 'preprocessed', f'{subj}_{sess}_{run}_pl_msgs_events.csv'))
df_events['subj'] = subj
df_events['sess'] = sess
df_samples = pd.read_csv(os.path.join(subj_fpath, sess, run, 'preprocessed', f'{subj}_{sess}_{run}_pl_msgs_samples.csv'))
df_samples['subj'] = subj
df_samples['sess'] = sess
# clean up
df_events = _clean_up(dataframe=df_events)
df_samples = _clean_up(dataframe=df_samples)
# concat to dataframe
df_events_all = pd.concat([df_events_all, df_events])
df_samples_all = pd.concat([df_samples_all, df_samples])
except:
print(f'no preprocessed data for {subj}_{sess}_{run}')
# clean up
df_events_all.to_csv(os.path.join(subj_fpath, f'eyetracking_events_{subj}.csv'), index=False)
df_samples_all.to_csv(os.path.join(subj_fpath, f'eyetracking_samples_{subj}.csv'), index=False)
def group_data(data_dir):
# get all subjs
subj_dirs = glob.glob(os.path.join(data_dir, '*s*'))
# loop over subjs
df_events_all = pd.DataFrame()
df_samples_all = pd.DataFrame()
for subj_dir in subj_dirs:
if os.path.isdir(subj_dir):
# get subj name
subj = Path(subj_dir).name
# load preprocessed data for subj/sess
try:
df_events = pd.read_csv(os.path.join(subj_dir, f'eyetracking_events_{subj}.csv'))
df_samples = pd.read_csv(os.path.join(subj_dir, f'eyetracking_samples_{subj}.csv'))
# concat to dataframe
df_events_all = | pd.concat([df_events_all, df_events]) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: | pd.Timestamp("2012-05-09 00:00:00") | pandas.Timestamp |
import urllib.request
import PyPDF2
import tempfile
import re
import pandas as pd
import sqlite3
from sqlite3 import Error
#df= pd.DataFrame(columns=['incident_time','incident_number','incident_location','nature','incident_ori'])
#getting the pdf file from the page which is provided as URL
def fetchIncidents(url):
try:
data=urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
print("url entered is not working")
return None
return data
def extractIncidents(data):
"""
Here we extract the data pulled from the website and parse it using a pdfReader.
args:
data : which is returned by the fetchIncidents(url)
returns:
Dataframe.
"""
df = | pd.DataFrame(columns=['incident_time', 'incident_number', 'incident_location', 'nature', 'incident_ori']) | pandas.DataFrame |
import argparse, os, sys
import pandas as pd
from collections import defaultdict
from enum import Enum
from typing import List
from util.util_funcs import (
MAX_NUM_COLS,
MAX_NUM_ROWS,
MAX_TABLE_SIZE,
create_table_dict,
get_evidence_docs,
load_jsonl,
store_json,
)
from tqdm import tqdm
from util.logger import get_logger
DIR_PATH = os.path.abspath(os.getcwd())
FEVEROUS_PATH = DIR_PATH + "/FEVEROUS/src"
sys.path.insert(0, FEVEROUS_PATH)
from database.feverous_db import FeverousDB
from utils.wiki_page import WikiPage
logger = get_logger()
WRITE_TO_FILE = True
MAX_NR_CELLS = 5 # The maximum nr of cells that is retrieved from each table
table_cell_evidence_dist = defaultdict(int)
sent_evidence_dist = defaultdict(int)
stats = defaultdict(int)
evidence_doc_title_word_len_dist = defaultdict(int)
table_size_dist = {"nr_rows": [], "nr_cols": [], "size": []} # Size = rows x cols
max_cell_counts = {"max_cell_count": []}
class Split(Enum):
TRAIN = "train"
DEV = "dev"
def get_max_cell_count(data_point: dict):
"""Counts the evidence cells for each table and returns the max count for a single table
Args:
data_point (dict): A data sample from the FEVEROUS dataset
Returns:
int: The count of cell for the table with the max amount of cells in the evidence
"""
cell_ids = set()
table_id_to_cell_count = defaultdict(int)
for evidence_obj in data_point["evidence"]:
for evidence_id in evidence_obj["content"]:
if "_cell_" in evidence_id and evidence_id not in cell_ids:
cell_ids.add(evidence_id)
cell_id_split = evidence_id.split("_")
table_id = "{}_{}".format(cell_id_split[0], cell_id_split[2])
table_id_to_cell_count[table_id] += 1
cell_counts = list(table_id_to_cell_count.values())
max_cell_count = max(cell_counts)
return max_cell_count
def get_cell_id_for_each_table(data_point):
table_cell_ids = set()
table_ids = set()
for evidence_obj in data_point["evidence"]:
for evidence_id in evidence_obj["content"]:
if "_cell_" in evidence_id:
cell_id_split = evidence_id.split("_")
table_id = "{}_{}".format(cell_id_split[0], cell_id_split[2])
if table_id not in table_ids:
table_ids.add(table_id)
table_cell_ids.add(evidence_id)
return table_cell_ids
def get_tables(db: FeverousDB, table_cell_ids: List[str]):
doc_ids = set()
for table_cell_id in table_cell_ids:
table_cell_id_split = table_cell_id.split("_")
doc_id = table_cell_id_split[0]
doc_ids.add(doc_id)
table_dicts = []
for doc_id in doc_ids:
doc_json = db.get_doc_json(doc_id)
page = WikiPage(doc_id, doc_json)
for table_cell_id in table_cell_ids:
cell_doc = table_cell_id.split("_")[0]
if doc_id == cell_doc:
cell_id = "_".join(table_cell_id.split("_")[1:])
wiki_table = page.get_table_from_cell_id(cell_id)
table_dict = create_table_dict(wiki_table)
table_dicts.append(table_dict)
return table_dicts
def add_total_stats(db, data):
for d in tqdm(data):
stats["total_samples"] += 1
stats["total_{}".format(d["label"])] += 1
if len(d["evidence"]) > 1:
stats["samples_with_multiple_evidence"] += 1
else:
stats["samples_with_single_evidence"] += 1
nr_of_cells, nr_of_sents, nr_of_other = 0, 0, 0
for evidence_obj in d["evidence"]:
for evidence_id in evidence_obj["content"]:
if "_cell_" in evidence_id:
nr_of_cells += 1
elif "_sentence_" in evidence_id:
nr_of_sents += 1
else:
nr_of_other += 1
if nr_of_cells > 0:
stats["samples_with_table_cell_evidence"] += 1
if nr_of_sents > 0:
stats["samples_with_sent_evidence"] += 1
if nr_of_other > 0:
stats["samples_with_other_evidence"] += 1
table_cell_evidence_dist[nr_of_cells] += 1
sent_evidence_dist[nr_of_sents] += 1
evidence_docs = get_evidence_docs(d)
for doc in evidence_docs:
words = doc.split(" ")
evidence_doc_title_word_len_dist[len(words)] += 1
if nr_of_cells > 0:
table_cell_evidence_ids = get_cell_id_for_each_table(d)
if len(table_cell_evidence_ids) > 1:
stats["samples_with_multiple_table_evidence"] += 1
table_dicts = get_tables(db, table_cell_evidence_ids)
for table_dict in table_dicts:
nr_of_cols = len(table_dict["header"])
nr_of_rows = len(table_dict["rows"]) + 1 # Counting the header as a row
table_size = nr_of_cols * nr_of_rows
table_size_dist["nr_rows"].append(nr_of_rows)
table_size_dist["nr_cols"].append(nr_of_cols)
table_size_dist["size"].append(table_size)
if (
nr_of_cols > MAX_NUM_COLS
or nr_of_rows > MAX_NUM_ROWS
or table_size > MAX_TABLE_SIZE
):
stats["tables_too_large_to_fit_model"] += 1
if nr_of_cols > MAX_NUM_COLS:
stats["tables_with_too_many_columns"] += 1
if nr_of_rows > MAX_NUM_ROWS:
stats["tables_with_too_many_rows"] += 1
if table_size > MAX_TABLE_SIZE:
stats["tables_with_too_many_cells"] += 1
stats[
"total_nr_of_tables"
] += (
1
) # The will count duplicates if the same table is evidence in more than one claim
max_cell_count = get_max_cell_count(d)
max_cell_counts["max_cell_count"].append(max_cell_count)
if max_cell_count > MAX_NR_CELLS:
stats[
"samples_with_more_than_{}_evidence_cells".format(MAX_NR_CELLS)
] += 1
stats["percentage_of_tables_discarded"] = (
stats["tables_too_large_to_fit_model"] / stats["total_nr_of_tables"]
)
def add_split_stats(data: List[dict], split: Split):
split_str = split.value
for d in tqdm(data):
stats["{}_samples".format(split_str)] += 1
stats["{}_{}".format(split_str, d["label"])] += 1
if len(d["evidence"]) > 1:
stats["{}_samples_with_multiple_evidence".format(split_str)] += 1
else:
stats["{}_samples_with_single_evidence".format(split_str)] += 1
nr_of_cells = 0
nr_of_sents = 0
nr_of_other = 0
for evidence_obj in d["evidence"]:
for evidence_id in evidence_obj["content"]:
if "_cell_" in evidence_id:
nr_of_cells += 1
elif "_sentence_" in evidence_id:
nr_of_sents += 1
else:
nr_of_other += 1
if nr_of_cells > 0:
stats["{}_samples_with_table_cell_evidence".format(split_str)] += 1
if nr_of_sents > 0:
stats["{}_samples_with_sent_evidence".format(split_str)] += 1
if nr_of_other > 0:
stats["{}_samples_with_other_evidence".format(split_str)] += 1
table_cell_evidence_dist[nr_of_cells] += 1
sent_evidence_dist[nr_of_sents] += 1
evidence_docs = get_evidence_docs(d)
for doc in evidence_docs:
words = doc.split(" ")
evidence_doc_title_word_len_dist[len(words)] += 1
def main():
parser = argparse.ArgumentParser(
description="Creates statistics of the provided datasets"
)
parser.add_argument(
"--db_path", default=None, type=str, help="Path to the FEVEROUS database"
)
parser.add_argument(
"--train_data_path",
default=None,
type=str,
help="Path to the train dataset file",
)
parser.add_argument(
"--dev_data_path", default=None, type=str, help="Path to the dev dataset file"
)
parser.add_argument(
"--out_path", default=None, type=str, help="Path to the output directory"
)
args = parser.parse_args()
if not args.db_path:
raise RuntimeError("Invalid database path")
if ".db" not in args.db_path:
raise RuntimeError("The database path should include the name of the .db file")
if not args.train_data_path:
raise RuntimeError("Invalid train data path")
if ".jsonl" not in args.train_data_path:
raise RuntimeError(
"The train data path should include the name of the .jsonl file"
)
if not args.dev_data_path:
raise RuntimeError("Invalid dev data path")
if ".jsonl" not in args.dev_data_path:
raise RuntimeError(
"The dev data path should include the name of the .jsonl file"
)
if not args.out_path:
raise RuntimeError("Invalid output dir path")
out_dir = os.path.dirname(args.out_path)
if not os.path.exists(out_dir):
logger.info("Output directory doesn't exist. Creating {}".format(out_dir))
os.makedirs(out_dir)
db = FeverousDB(args.db_path)
train_data = load_jsonl(args.train_data_path)[1:]
dev_data = load_jsonl(args.dev_data_path)[1:]
add_total_stats(db, train_data + dev_data)
add_split_stats(train_data, Split.TRAIN)
add_split_stats(dev_data, Split.DEV)
if WRITE_TO_FILE:
table_cell_evidence_dist_file = out_dir + "/table_cell_evidence_dist.json"
store_json(
table_cell_evidence_dist,
table_cell_evidence_dist_file,
sort_keys=True,
indent=2,
)
logger.info(
"Stored table cell evidence distribution in '{}'".format(
table_cell_evidence_dist_file
)
)
sent_evidence_dist_file = out_dir + "/sent_evidence_dist.json"
store_json(
sent_evidence_dist, sent_evidence_dist_file, sort_keys=True, indent=2
)
logger.info(
"Stored sentence evidence distribution in '{}'".format(
sent_evidence_dist_file
)
)
evidence_doc_title_word_len_dist_file = (
out_dir + "/evidence_doc_title_word_len_dist.json"
)
store_json(
evidence_doc_title_word_len_dist,
evidence_doc_title_word_len_dist_file,
sort_keys=True,
indent=2,
)
logger.info(
"Stored evidence document title word length distribution in '{}'".format(
evidence_doc_title_word_len_dist_file
)
)
stats_file = out_dir + "/stats.json"
store_json(stats, stats_file, sort_keys=True, indent=2)
logger.info("Stored stats in '{}'".format(stats_file))
table_size_dist_file = out_dir + "/table_size_dist.csv"
table_size_dist_df = pd.DataFrame.from_dict(table_size_dist)
table_size_dist_df.to_csv(table_size_dist_file)
logger.info("Stored table size dist in '{}'".format(table_size_dist_file))
max_cell_counts_file = out_dir + "/max_cell_counts.csv"
max_cell_counts_df = | pd.DataFrame.from_dict(max_cell_counts) | pandas.DataFrame.from_dict |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = | Timedelta(1, 'D') | pandas.Timedelta |
import datetime
import logging
import os
import random
from typing import Dict, List, Optional, Tuple, Union, cast
try:
import ib_insync
except ModuleNotFoundError:
print("Can't find ib_insync")
import pandas as pd
import helpers.dbg as dbg
import helpers.printing as hprint
import helpers.s3 as hs3
# from tqdm.notebook import tqdm
_LOG = logging.getLogger(__name__)
def ib_connect(client_id: int = 0, is_notebook: bool = True) -> ib_insync.ib.IB:
# TODO(gp): Add check if we are in notebook.
if is_notebook:
ib_insync.util.startLoop()
ib = ib_insync.IB()
host = os.environ["IB_GW_CONNECTION_HOST"]
port = os.environ["IB_GW_CONNECTION_PORT"]
_LOG.debug("Trying to connect to client_id=%s", client_id)
ib.connect(host=host, port=port, clientId=client_id)
#
ib_insync.IB.RaiseRequestErrors = True
_LOG.debug("Connected to IB: client_id=%s", client_id)
return ib
def get_free_client_id(max_attempts: Optional[int]) -> int:
"""
Find free slot to connect to IB gateway.
"""
free_client_id = -1
max_attempts = 1 if max_attempts is None else max_attempts
for i in random.sample(
range(1, max_attempts + 1),
max_attempts,
):
try:
ib_connection = ib_connect(i, is_notebook=False)
except TimeoutError:
continue
free_client_id = i
ib_connection.disconnect()
break
if free_client_id == -1:
raise TimeoutError("Couldn't connect to IB")
return free_client_id
def to_contract_details(ib, contract):
print("contract= (%s)\n\t%s" % (type(contract), contract))
contract_details = ib.reqContractDetails(contract)
print(
"contract_details= (%s)\n\t%s"
% (type(contract_details), contract_details)
)
dbg.dassert_eq(len(contract_details), 1)
return hprint.obj_to_str(contract_details[0])
def get_contract_details(
ib: ib_insync.ib.IB, contract: ib_insync.Contract, simplify_df: bool = False
) -> pd.DataFrame:
_LOG.debug("contract=%s", contract)
cds = ib.reqContractDetails(contract)
_LOG.info("num contracts=%s", len(cds))
contracts = [cd.contract for cd in cds]
_LOG.debug("contracts[0]=%s", contracts[0])
contracts_df = ib_insync.util.df(contracts)
if simplify_df:
# TODO(*): remove or avoid since it is only one place where `core` is used.
# _LOG.debug(cexplo.print_column_variability(contracts_df))
# Remove exchange.
_LOG.debug("exchange=%s", contracts_df["exchange"].unique())
contracts_df.sort_values("lastTradeDateOrContractMonth", inplace=True)
contracts_df = contracts_df.drop(columns=["exchange", "comboLegs"])
# Remove duplicates.
contracts_df = contracts_df.drop_duplicates()
# Remove constant values.
# threshold = 1
# TODO(*): remove or avoid since it is only one place where `core` is used.
# contracts_df = cexplo.remove_columns_with_low_variability(
# contracts_df, threshold
# )
return contracts_df
# #############################################################################
def get_df_signature(df: pd.DataFrame) -> str:
if df is None or df.empty:
return ""
txt = "len=%d [%s, %s]" % (len(df), df.index[0], df.index[-1])
return txt
def to_ET(
ts: Union[datetime.datetime, pd.Timestamp, str], as_datetime: bool = True
) -> Union[datetime.datetime, pd.Timestamp, str]:
# Handle IB convention that an empty string means now.
if ts == "":
return ""
ts = pd.Timestamp(ts)
if ts.tzinfo is None:
ts = ts.tz_localize(tz="America/New_York")
else:
ts = ts.tz_convert(tz="America/New_York")
if as_datetime:
ts = ts.to_pydatetime()
return ts
def to_timestamp_str(ts: pd.Timestamp) -> str:
dbg.dassert_is_not(ts, None)
ret = ts.strftime("%Y%m%dT%H%M%S")
cast(str, ret)
return ret
# #############################################################################
def req_historical_data(
ib: ib_insync.ib.IB,
contract: ib_insync.Contract,
end_ts: Union[datetime.datetime, pd.Timestamp, str],
duration_str: str,
bar_size_setting: str,
what_to_show: str,
use_rth: bool,
num_retry: Optional[int] = None,
) -> pd.DataFrame:
"""
Wrap ib.reqHistoricalData() adding a retry semantic and returning a df.
IB seem to align days on boundaries at 18:00 of every day.
"""
check_ib_connected(ib)
num_retry = num_retry or 3
end_ts = to_ET(end_ts)
#
for i in range(num_retry):
bars = []
try:
_LOG.debug("Requesting data for %s, end_ts=%s...", contract, end_ts)
bars = ib.reqHistoricalData(
contract,
endDateTime=end_ts,
durationStr=duration_str,
barSizeSetting=bar_size_setting,
whatToShow=what_to_show,
useRTH=use_rth,
# Use UTC.
formatDate=2,
)
break
except ib_insync.wrapper.RequestError as e:
_LOG.warning(str(e))
if e.code == 162:
# RequestError: API error: 162: Historical Market Data Service
# error message:HMDS query returned no data
# There is no data.
break
# Retry.
_LOG.info("Retry: %s / %s", i + 1, num_retry)
if i == num_retry:
dbg.dfatal("Failed after %s retries", num_retry)
if bars:
# Sanity check.
dbg.dassert_lte(bars[0].date, bars[-1].date)
# Organize the data as a dataframe with increasing times.
df = ib_insync.util.df(bars)
df.set_index("date", drop=True, inplace=True)
dbg.dassert_monotonic_index(df)
# Convert to ET.
if bar_size_setting != "1 day":
df.index = df.index.tz_convert(tz="America/New_York")
_LOG.debug("df=%s", get_df_signature(df))
else:
df = pd.DataFrame()
return df
def get_end_timestamp(
ib, contract, what_to_show, use_rth, num_retry=None
) -> datetime.datetime:
"""
Return the last available timestamp by querying the historical data.
"""
endDateTime = ""
duration_str = "1 D"
bar_size_setting = "1 min"
bars = req_historical_data(
ib,
contract,
endDateTime,
duration_str,
bar_size_setting,
what_to_show,
use_rth,
num_retry=num_retry,
)
# Set end timestamp to now if there are non any data.
if bars.empty:
_LOG.warning("No data found, set end_ts to now")
last_ts = pd.Timestamp.now()
else:
# Get the last timestamp.
last_ts = bars.index[-1]
return last_ts
# #############################################################################
def duration_str_to_pd_dateoffset(duration_str: str) -> pd.DateOffset:
if duration_str == "1 D":
ret = pd.DateOffset(days=1)
elif duration_str == "2 D":
ret = pd.DateOffset(days=2)
elif duration_str == "3 D":
ret = pd.DateOffset(days=3)
elif duration_str == "4 D":
ret = pd.DateOffset(days=4)
elif duration_str == "7 D":
ret = pd.DateOffset(days=7)
elif duration_str == "1 M":
ret = pd.DateOffset(months=1)
elif duration_str == "1 Y":
ret = | pd.DateOffset(years=1) | pandas.DateOffset |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: | pd.Timestamp("2012-11-06 00:00:00") | pandas.Timestamp |
''' Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import pandas as pd
import matplotlib.pyplot as plt
print("\n\t\t Finite Volume Method for 1D Steady State Diffusion \n")
# Switch case for type of numerical
choice = ""
while choice != "q":
print("""\t[ 1 ] Diffusion without Source
[ 2 ] Diffusion with uniform source
[ q ] Exit\n""")
choice = input("\n\tEnter Choice :\t")
if choice == "1":
print("\n\t\tDiffusion Without Source\n")
q = 0
break
elif choice == "2":
print("\n\t\tDiffusion with Uniform Source\n")
q = float(input("\n\tEnter uniform heat generation q in W/m2: "))
break
elif choice == "q":
exit()
else:
print("\n\n\tInvalid choice, Try again!\n")
# input from user
n = int(input("\n\tEnter the number of grid points: "))
l = float(input("\n\tEnter length of plate in m: "))
tk = float(input("\n\tEnter thermal conductivity of plate in W/mK or W/mC: "))
ta = float(input("\n\tEnter temperature at left face Ta in C: "))
tb = float(input("\n\tEnter temperature at right face Tb in C: "))
# create empty list
D = [0]*n
beta = [0]*n
alpha = [0]*n
c = [0]*n
A = [0]*n
C = [0]*n
temp = [0]*n
Err = [0]*n
Texact = [0]*n
g = [0]*n
g1 = [0]*n
# setting up equations in tdma format
dx = l/n
D[0] = (3*tk)/dx
D[1] = (2*tk)/dx
D[n-1] = (3*tk)/dx
beta[1] = tk/dx
alpha[1] = tk/dx
c[0] = ((2*tk*ta)/dx)+(q*dx)
for i in range(1, n-1):
c[i] = q*dx
c[n-1] = ((2*tk*tb)/dx)+(q*dx)
beta[0] = 0
beta[n-1] = beta[1]
alpha[0] = alpha[1]
alpha[n-1] = 0
# add common value to list D, beta and alpha
for i in range(2, n-1):
D[i] = D[1]
beta[i] = beta[1]
alpha[i] = alpha[1]
# Calculating intermediate terms by forward substitution
for i in range(0, n):
A[i] = alpha[i]/(D[i] - beta[i]*A[i-1])
C[i] = (beta[i]*C[i-1] + c[i])/(D[i] - beta[i]*A[i-1])
# equating last value for back substitution
temp[n-1] = C[n-1]
# Calculating Temperarure values by backward substitution
j = n-2
while j >= 0:
temp[j] = A[j] * temp[j+1] + C[j]
j = j-1
# Calculating Exact Solution and error
A1 = (-q)/(2*tk)
A2 = tb/l - ta/l + (q*l)/(2*tk)
dx = l/n
for i in range(0, n):
g[i] = dx*0.5 + (dx * i)
Texact[i] = A1 * g[i] * g[i] + A2 * g[i] + ta
Err[i] = ((temp[i] - Texact[i]) * 100*2) / (temp[i] + Texact[i])
g1[i]=g[i]#create copy of g for excel
# create output tuple
OUTPUT = list(zip(beta, D, alpha, c, A, C, temp, Texact, Err))
# create Pandas DataFrame
result = pd.DataFrame(data=OUTPUT, columns=["\N{GREEK SMALL LETTER BETA}", "Diagonal(D)", "\N{GREEK SMALL LETTER ALPHA}",
"Constant(C)", "A", "C'", "Temperature(T)", "Temperature Exact(T exact)", "% Error"])
# change index to 1,2,3,.....
result.index = result.index + 1
# print table
print("\n\n")
print(result)
#plot and show graph
# adding initial and final conditions to the list, as list contains values at nodes
temp.insert(0, ta)
temp.append(tb)
Texact.insert(0, ta)
Texact.append(tb)
g.insert(0, 0)
g.append(l)
graph = | pd.DataFrame({'Temperature Numerical': temp, 'Temperature Exact': Texact}, index=g) | pandas.DataFrame |
import sys
import pandas as pd
import boto3
import io
from io import StringIO
from awsglue.utils import getResolvedOptions
args = getResolvedOptions(
sys.argv, ["S3_SOURCE", "S3_DEST", "TRAIN_KEY", "SCORE_KEY", "INFERENCE_TYPE"]
)
s3_source = args["S3_SOURCE"]
s3_dest = args["S3_DEST"]
train_key = args["TRAIN_KEY"]
score_key = args["SCORE_KEY"]
inference_type = args["INFERENCE_TYPE"]
# ---FUNCTIONS-------------------------------
def data_transform(obj, train=True):
# Perform data transformation on training and scoring sets
df = pd.read_csv(io.BytesIO(obj["Body"].read()), encoding="utf8")
df = df.set_index("EmployeeNumber")
df["BusinessTravel"].replace(
to_replace=["Non-Travel", "Travel_Rarely", "Travel_Frequently"],
value=[0, 1, 2],
inplace=True,
)
df["Gender"].replace(to_replace=["Male", "Female"], value=[0, 1], inplace=True)
df.replace(to_replace=["No", "Yes"], value=[0, 1], inplace=True)
df["RatioYearsPerCompany"] = df["TotalWorkingYears"] / (
df["NumCompaniesWorked"] + 1
)
cont_vars = [
"Age",
"DistanceFromHome",
"MonthlyIncome",
"PercentSalaryHike",
"TrainingTimesLastYear",
"RatioYearsPerCompany",
"YearsAtCompany",
"YearsInCurrentRole",
"YearsSinceLastPromotion",
"YearsWithCurrManager",
]
ord_vars = [
"BusinessTravel",
"Education",
"EnvironmentSatisfaction",
"JobInvolvement",
"JobLevel",
"JobSatisfaction",
"PerformanceRating",
"RelationshipSatisfaction",
"StockOptionLevel",
"WorkLifeBalance",
]
cat_vars = ["Department", "EducationField", "JobRole", "MaritalStatus"]
bool_vars = ["Gender", "OverTime"]
df_dummy = | pd.get_dummies(df[cat_vars]) | pandas.get_dummies |
# import ptvsd
# ptvsd.enable_attach(address = ('0.0.0.0', 5678))
# ptvsd.wait_for_attach()
import os
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data
from torch.optim.lr_scheduler import ReduceLROnPlateau
from transformers import AutoModel, AutoConfig, BertTokenizer, AutoModelWithLMHead
from torchviz import make_dot
import time
import numpy as np
from tqdm import trange
from sklearn.metrics import roc_auc_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
# from getFeatures import save_smiles_dicts, get_smiles_dicts, get_smiles_array, moltosvg_highlight
import gc
import sys
import pickle
import random
import csv
import json
import re
import argparse
# from tensorboardX import SummaryWriter
import copy
import pandas as pd
import scipy
# then import my own modules
from AttentiveFP import Fingerprint, Fingerprint_viz, save_smiles_dicts, get_smiles_dicts, get_smiles_array, \
moltosvg_highlight
from network import Network
#无已有pickle时需全调用
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import QED
# get_ipython().run_line_magic('matplotlib', 'inline')
from numpy.polynomial.polynomial import polyfit
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
# from ProEmb.ProBert import model_bert, model_mlm
# from IPython.display import SVG, display
import seaborn as sns;
from torchsummary import summary
sns.set(color_codes=True)
class DataHandler():
def __init__(self, raw_filename, max_atom_len=0, max_bond_len=0):
def get_cm_dict(pickle_dir):
seq_list = []
cm_list = []
id_list = []
degree_list_dict = {}
max_neighbor_num = 0
for f in os.listdir(pickle_dir):
f_degree = os.path.dirname(pickle_dir) + '/' + f.split('.')[0] + '_' + 'degree' + '_' + str(self.max_len) + '.pkl'
f_cm = os.path.dirname(pickle_dir) + '/' + f.split('.')[0] + '_' + str(self.max_len) + '.pkl'
if os.path.exists(f_degree) and os.path.exists(f_cm):
with open(f_degree, 'rb') as f_r:
degree_list_dict = pickle.load(f_r)
max_neighbor_num = list(degree_list_dict.items())[0][1].shape[1]
with open(f_cm, 'rb') as f_r:
cm_df = pickle.load(f_r)
else:
f = os.path.join(pickle_dir, f) # /home/eason/PPI/drug/GAT/data/3d_pdb/pdbbind_2016
data = pickle.load(open(f, 'rb')) # PDB-ID seqs contact_map
for index, row in data.iterrows():
seq = row[cm_seq][:self.max_len]
if seq == '':
continue
cm = row['contact_map'][0][:self.max_len, :self.max_len] # row['contact_map']:208×208
mn = np.max(np.sum(cm, axis=1))
if max_neighbor_num < mn:
max_neighbor_num = mn
for index, row in data.iterrows():
seq = row[cm_seq][:self.max_len]
if seq == '':
continue
cm = row['contact_map'][0][:self.max_len, :self.max_len] # row['contact_map']:208×208
cm_tmp = cm.astype(int)
cm_tmp = np.pad(cm_tmp, ((0, self.max_len - cm.shape[0]), (0, self.max_len - cm.shape[1])), 'constant', constant_values=(0, 0))
cm_list.append(cm_tmp)
seq_list.append(row[cm_seq])
id_list.append(row['PDB-ID'])
degree_list = []
for i in range(len(seq)):
tmp = np.array(np.where(cm[i] > 0.5)[0])
tmp = np.pad(tmp, (0, max_neighbor_num - tmp.shape[0]), 'constant', constant_values=(-1, -1))
degree_list.append(tmp)
degree_list = np.stack(degree_list, 0)
degree_list = np.pad(degree_list, ((0, self.max_len - degree_list.shape[0]), (0, 0)), 'constant',
constant_values=(-1, -1))
degree_list_dict[row['PDB-ID']] = degree_list
cm_df = pd.DataFrame({"PDB-ID": id_list, "seqs": seq_list, "cm_pad": cm_list})
with open(f_degree, 'wb') as f_w:
pickle.dump(degree_list_dict, f_w)
with open(f_cm, 'wb') as f_w:
pickle.dump(cm_df, f_w)
return degree_list_dict, max_neighbor_num, cm_df
self.data_df, self.smile_feature_dict = self.load_smile(raw_filename, max_atom_len=max_atom_len, max_bond_len=max_bond_len)
self.amino_dict = {}
for key, value in vocab.items():
if value - special_vocab_size >= 0:
self.amino_dict[key] = value - special_vocab_size
# for protein structure
self.input_size = nonspecial_vocab_size
self.max_len = max_seq_len # 512
self.enc_lib = np.eye(self.input_size)
if model_type != "only_molecule":
self.degree_list_dict, self.max_neighbor_num, self.cm_df = get_cm_dict(cm_pickle_dir) # degree_list_dict:字典变量,每个氨基酸序列和对应的氨基酸之间的作用图(不是蛋白质之间的作用图)。
def get_init(self, seq_list):
mat = []
for seq in seq_list:
# seq = list(map(lambda ch: ord(ch) - ord('A'), seq[:self.max_len]))
seq = [self.amino_dict[ch] for ch in seq[: self.max_len]]
enc = self.enc_lib[seq]
if enc.shape[0] < self.max_len:
enc = np.pad(enc, ((0, self.max_len - enc.shape[0]), (0, 0)), 'constant')
# print(enc.shape)
mat.append(enc)
mat = np.stack(mat, 0)
mat = mat.astype(np.float32)
return mat
def get_degree_list(self, seq_list):
mat = []
for seq in seq_list:
seq = seq[:self.max_len]
if seq in self.degree_list_dict:
cm = self.degree_list_dict[seq]
else:
# print('Sequence not found, ', seq)
cm = np.ones([self.max_len, self.max_neighbor_num])
cm = cm * -1
mat.append(cm)
mat = np.stack(mat, 0)
return mat
def get_amino_mask(self, seq_list):
mat = []
for seq in seq_list:
mask = np.ones(min(len(seq), self.max_len), dtype=np.int)
mask = np.pad(mask, (0, self.max_len - len(mask)), 'constant')
mat.append(mask)
mat = np.stack(mat, 0)
# print('mask', mat)
return mat
def get_pro_structure(self, seq_list):
# f1 = cal_mem()
amino_list = self.get_init(seq_list)
# f2 = cal_mem()
# print('Get Pro Structure Index {}-{} costs: {}MB'.format('f2', 'f1', round(f1-f2, 4)))
amino_degree_list = self.get_degree_list(seq_list)
# f3 = cal_mem()
# print('Get Pro Structure Index {}-{} costs: {}MB'.format('f2', 'f3', round(f2 - f3, 4)))
amino_mask = self.get_amino_mask(seq_list)
# f4 = cal_mem()
# print('Get Pro Structure Index {}-{} costs: {}MB'.format('f3', 'f4', round(f3 - f4, 4)))
return amino_list, amino_degree_list, amino_mask
def load_smile(self, raw_filename, max_atom_len=0, max_bond_len=0):
# raw_filename : "./PPI/drug/tasks/DTI/pdbbind/pafnucy_total_rdkit-smiles-v1.csv"
filename = os.path.splitext(raw_filename)[0]
ext_name = os.path.splitext(raw_filename)[-1]
feature_filename = filename + '.pickle'
prefix_filename = os.path.splitext(os.path.split(raw_filename)[-1])[0]
# smiles_tasks_df : df : ["unnamed", "PDB-ID", "seq", "SMILES", "rdkit_smiles", "Affinity-Value", "set"]
if ext_name == '.xlsx':
smiles_tasks_df = pd.read_excel(io = raw_filename) # main file
elif ext_name == '.csv':
smiles_tasks_df = pd.read_csv(raw_filename) # main file
else:
sys.exit(1)
# smilesList : array, 13464
smilesList = smiles_tasks_df[SMILES].values
print("number of all smiles: ", len(smilesList))
atom_num_dist = []
remained_smiles = []
canonical_smiles_list = []
for smiles in smilesList:
try:
mol = Chem.MolFromSmiles(smiles) # input : smiles seqs, output : molecule obeject
atom_num_dist.append(len(mol.GetAtoms())) # list : get atoms obeject from molecule obeject
remained_smiles.append(smiles) # list : smiles without transformation error
canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True)) # canonical smiles without transformation error
except:
print("the smile is %s with transformation error" % smiles)
pass
print("number of successfully processed smiles after the first test: ", len(remained_smiles))
"----------------------the first test----------------------"
smiles_tasks_df = smiles_tasks_df[smiles_tasks_df[SMILES].isin(remained_smiles)] # df(13464) : include smiles without transformation error
# smiles_tasks_df[SMILES] = canonical_smiles_list
smiles_tasks_df[SMILES] = remained_smiles
smilesList = remained_smiles # update valid smile
# feature_dicts(dict) :
# {smiles_to_atom_info, smiles_to_atom_mask, smiles_to_atom_neighbors, "smiles_to_bond_info", "smiles_to_bond_neighbors", "smiles_to_rdkit_list"}
if os.path.isfile(feature_filename): # get smile feature dict
feature_dicts = pickle.load(open(feature_filename, "rb"))
print("load derectly!")
else:
feature_dicts = save_smiles_dicts(smilesList, filename, max_atom_len=max_atom_len, max_bond_len=max_bond_len)
print("save pickle!")
"----------------------the second test----------------------"
remained_df = smiles_tasks_df[smiles_tasks_df[SMILES].isin(feature_dicts['smiles_to_atom_mask'].keys())] # df(13435) : include smiles without transformation error and second test error
# uncovered_index = ~smiles_tasks_df[SMILES].isin(feature_dicts['smiles_to_atom_mask'].keys())
# uncovered_id = smiles_tasks_df["PDB-ID"][uncovered_index]
# uncovered_df = smiles_tasks_df.drop(remained_df.index)
print("number of successfully processed smiles after the second test: ", len(remained_df))
return remained_df, feature_dicts
def tokenize(sent_list, vocab, seq_len):
seq_len = seq_len + 2 # add [CLS] and [SEP]
all_input_ids = []
all_attention_mask = []
all_token_type_ids = []
for sent in sent_list:
attention_mask = [1 for _ in range(seq_len)]
token_type_ids = [0 for _ in range(seq_len)]
tmp = [vocab['[CLS]']]
for word in sent:
tmp.append(vocab[word])
if len(tmp) == seq_len - 1:
break
tmp.append(vocab['[SEP]'])
if len(tmp) < seq_len:
for i in range(len(tmp), seq_len):
tmp.append(vocab['[PAD]'])
attention_mask[i] = 0
all_input_ids.append(tmp)
all_attention_mask.append(attention_mask)
all_token_type_ids.append(token_type_ids)
all_input_ids = np.array(all_input_ids)
all_attention_mask = np.array(all_attention_mask)
all_token_type_ids = np.array(all_token_type_ids)
return torch.from_numpy(all_input_ids), torch.from_numpy(all_attention_mask), torch.from_numpy(all_token_type_ids)
# 如更改n-gram则此处要改
def create_sent(seq_list, seg_len=1):
sent_list = []
if seg_len == 1:
for s in seq_list:
sent_list.append(list(s))
else:
for s in seq_list:
tmp = []
for i in range(len(s) - seg_len + 1):
tmp.append(s[i: i + seg_len])
sent_list.append(tmp)
return sent_list
def train(model, dataset, optimizer, loss_function, epoch):
model.train()
# np.random.seed(epoch)
valList = list(dataset.index)
np.random.shuffle(valList)
batch_list = []
for i in range(0, dataset.shape[0], batch_size):
batch = valList[i:i + batch_size]
batch_list.append(batch)
for counter, batch in enumerate(batch_list):
batch_df = dataset.loc[batch, :]
smiles_list = batch_df[SMILES].values
seq_list = batch_df.seqs.values
y_val = batch_df[TASK].values
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,
feature_dicts)
amino_list, amino_degree_list, amino_mask = data_handler.get_pro_structure(seq_list)
pro_seqs = batch_df.seqs.values
sents = create_sent(pro_seqs)
tokenized_sent, all_attention_mask, all_token_type_ids = tokenize(sents, vocab, max_seq_len)
tokenized_sent = tokenized_sent.to(device)
all_attention_mask = all_attention_mask.to(device)
all_token_type_ids = all_token_type_ids.to(device)
prediction = model(torch.Tensor(x_atom).to(device), torch.Tensor(x_bonds).to(device), torch.LongTensor(x_atom_index).to(device), \
torch.LongTensor(x_bond_index).to(device), torch.Tensor(x_mask).to(device), tokenized_sent, all_attention_mask, all_token_type_ids, \
torch.tensor(amino_list).to(device), torch.LongTensor(amino_degree_list).to(device), \
torch.Tensor(amino_mask).to(device))
# loss = loss_function(prediction.view(-1, 2), torch.LongTensor(y_val).to(device).view(-1))
# b = 0.9
# flood = (loss - b).abs() + b
true_labels = torch.LongTensor(y_val).to(device).view(-1)
pred_labels = prediction.view(-1, 2)
focal_loss = 0
for true_label, pred_label in zip(true_labels, pred_labels):
pred_label = pred_label - torch.max(pred_label)
exp_pred_label = torch.exp(pred_label)
softmax_pred_label = exp_pred_label / torch.sum(exp_pred_label)
p = softmax_pred_label[true_label]
focal_loss += -0.6 * (1-p)**2 * torch.log(p)
optimizer.zero_grad()
focal_loss.backward()
optimizer.step()
# writer.add_scalar('data/train_loss', np.mean(np.array(losses)).item(), epoch)
def evaluate(model, dataset, loss_function, fp_show=False):
model.eval()
# torch.no_grad()
pred_list = []
true_list = []
# valList = np.arange(0, dataset.shape[0])
valList = list(dataset.index)
batch_list = []
preds = None
for i in range(0, dataset.shape[0], batch_size):
batch = valList[i:i + batch_size]
batch_list.append(batch)
for counter, batch in enumerate(batch_list):
batch_df = dataset.loc[batch, :]
smiles_list = batch_df[SMILES].values
seq_list = batch_df.seqs.values
# print(batch_df)
y_val = batch_df[TASK].values
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,
feature_dicts)
amino_list, amino_degree_list, amino_mask = data_handler.get_pro_structure(seq_list)
pro_seqs = batch_df.seqs.values
sents = create_sent(pro_seqs)
tokenized_sent, all_attention_mask, all_token_type_ids = tokenize(sents, vocab, max_seq_len)
tokenized_sent = tokenized_sent.to(device)
all_attention_mask = all_attention_mask.to(device)
all_token_type_ids = all_token_type_ids.to(device)
with torch.no_grad():
prediction = model(torch.Tensor(x_atom).to(device), torch.Tensor(x_bonds).to(device), torch.LongTensor(x_atom_index).to(device), \
torch.LongTensor(x_bond_index).to(device), torch.Tensor(x_mask).to(device), tokenized_sent, all_attention_mask, all_token_type_ids, \
torch.tensor(amino_list).to(device), torch.LongTensor(amino_degree_list).to(device), \
torch.Tensor(amino_mask).to(device))
if preds is None:
preds = prediction.detach().cpu().numpy()
else:
preds = np.append(preds, prediction.detach().cpu().numpy(), axis=0)
true_list.extend(batch_df[TASK].values)
loss = loss_function(torch.tensor(preds).to(device), torch.LongTensor(true_list).to(device).view(-1))
pred_list = np.argmax(preds, axis=1)
# auc_value = auc(pred_list, true_list)
# auc_value = roc_auc_score(y_true=pred_list, y_score=true_list)
f1 = f1_score(y_true=true_list, y_pred=pred_list)
precision = precision_score(y_true=true_list, y_pred=pred_list)
recall = recall_score(y_true=true_list, y_pred=pred_list)
mcc = matthews_corrcoef(y_true=true_list, y_pred=pred_list)
if fp_show:
tn, fp, fn, tp = confusion_matrix(y_true=true_list, y_pred=pred_list).ravel()
return loss, f1, precision, recall, mcc, fp
else:
return loss, f1, precision, recall, mcc
def predicted_value(model, dataset):
model.eval()
pred_list = []
valList = list(dataset.index)
batch_list = []
preds = None
for i in range(0, dataset.shape[0], batch_size):
batch = valList[i:i + batch_size]
batch_list.append(batch)
for counter, batch in enumerate(batch_list):
batch_df = dataset.loc[batch, :]
smiles_list = batch_df[SMILES].values
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,
feature_dicts)
if model_type != "only_molecule":
seq_list = batch_df.seqs.values
amino_list, amino_degree_list, amino_mask = data_handler.get_pro_structure(seq_list)
pro_seqs = batch_df.seqs.values
sents = create_sent(pro_seqs)
tokenized_sent, all_attention_mask, all_token_type_ids = tokenize(sents, vocab, max_seq_len)
tokenized_sent = tokenized_sent.to(device)
all_attention_mask = all_attention_mask.to(device)
all_token_type_ids = all_token_type_ids.to(device)
with torch.no_grad():
prediction = model(torch.Tensor(x_atom).to(device), torch.Tensor(x_bonds).to(device), torch.LongTensor(x_atom_index).to(device), \
torch.LongTensor(x_bond_index).to(device), torch.Tensor(x_mask).to(device), tokenized_sent, all_attention_mask, all_token_type_ids, \
torch.tensor(amino_list).to(device), torch.LongTensor(amino_degree_list).to(device), \
torch.Tensor(amino_mask).to(device))
else:
with torch.no_grad():
prediction = model(torch.Tensor(x_atom).to(device), torch.Tensor(x_bonds).to(device), torch.LongTensor(x_atom_index).to(device), \
torch.LongTensor(x_bond_index).to(device), torch.Tensor(x_mask).to(device), None, None, None, None, None, None)
if preds is None:
preds = prediction.detach().cpu().numpy()
else:
preds = np.append(preds, prediction.detach().cpu().numpy(), axis=0)
# pred_list = np.argmax(preds, axis=1)
p_list = []
for pred_label in preds:
pred_label = torch.tensor(pred_label) - torch.max(torch.tensor(pred_label))
exp_pred_label = torch.exp(pred_label)
softmax_pred_label = exp_pred_label / torch.sum(exp_pred_label)
p = softmax_pred_label[1]
p_list.append(float(p))
return p_list
def fun(radius, T, fingerprint_dim, weight_decay, learning_rate, p_dropout, pro_gat_dim, direction=False, load_model_path="", epochs=2, pre_param="", pre_model=None):
loss_function = nn.CrossEntropyLoss()
model = Network(int(round(radius)), int(round(T)), num_atom_features, num_bond_features,
int(round(fingerprint_dim)), p_dropout, pro_seq_dim, pro_seq_dim, pro_gat_dim, seq_model_type, task_type)
with open("./generate_parameters.txt", 'w') as f:
for param_name, param_value in model.named_parameters():
print(param_name, ":", param_value.size(), file=f)
print('Model parameters:', sum(param.numel() for param in model.parameters()), file=f)
model = model.to(device)
model = nn.DataParallel(model, device_ids=device_ids)
if load_model_path:
pre_state_dict = model.state_dict()
print(list(pre_state_dict.items())[0])
model.load_state_dict(torch.load(load_model_path, map_location="cpu"), strict=False)
after_state_dict = model.state_dict()
print(list(after_state_dict.items())[0])
if pre_param == "":
best_param = {}
best_param["train_epoch"] = 0
best_param["valid_epoch"] = 0
best_param["train_loss"] = 9e8
best_param["valid_loss"] = 9e8
else:
best_param = copy.deepcopy(pre_param)
best_model = copy.deepcopy(pre_model)
model = copy.deepcopy(pre_model)
# Print model's state_dict
print("Model's state_dict:")
# for param_tensor in model.state_dict():
# print(param_tensor, "\t", model.state_dict()[param_tensor].size())
print('Model parameters:', sum(param.numel() for param in model.parameters()))
# optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), 10 ** -learning_rate, weight_decay=10 ** -weight_decay)
optimizer = optim.Adam(model.parameters(), 10 ** -learning_rate, weight_decay=10 ** -weight_decay)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=patience)
plot_loss = []
plot_precision = []
plot_recall = []
unchange_num = 0
time_epochs = 0 # used to record real training epochs
for epoch in range(epochs):
start_time = time.time()
train(model, train_df, optimizer, loss_function, epoch + 1)
train_loss, train_f1, train_precision, train_recall, train_mcc, train_fp = evaluate(model, train_df, loss_function, fp_show=True)
valid_loss, valid_f1, valid_precision, valid_recall, valid_mcc, valid_fp = evaluate(model, valid_df, loss_function, fp_show=True)
test_loss, test_f1, test_precision, test_recall, test_mcc, test_fp = evaluate(model, test_df, loss_function, fp_show=True)
scheduler.step(valid_f1) # monitor mse and reduce lr
# tensorboard add
for para_group in optimizer.param_groups:
lr = para_group['lr']
# writer.add_scalar('data/learning_rates', lr, epoch)
real_epoch = epoch+1
if valid_loss < best_param["valid_loss"]:
best_epoch = real_epoch
best_param["train_loss"] = train_loss
if pre_param == "":
best_param["valid_epoch"] = real_epoch
else:
best_param["valid_epoch"] = real_epoch + pre_param["valid_epoch"]
best_param["valid_loss"] = valid_loss
best_model = copy.deepcopy(model)
# torch.save(best_model.state_dict(), './PPI/drug/GAT/save/best-model-current.pth')
end_time = time.time()
train_log = 'epoch: {}, train_loss:{:.3f}, train_F1:{:.3f}, train_precision:{:.3f}, train_recall:{:.3f}, train_mcc:{:.3f}, train_fp:{}'.format(
real_epoch, train_loss, train_f1, train_precision, train_recall, train_mcc, train_fp)
valid_log = len('epoch: {}, '.format(epoch))*' '+'valid_loss:{:.3f}, valid_F1:{:.3f}, valid_precision:{:.3f}, valid_recall:{:.3f}, valid_mcc:{:.3f}, valid_fp:{}'.format(
valid_loss, valid_f1, valid_precision, valid_recall, valid_mcc, valid_fp)
test_log = len('epoch: {}, '.format(epoch))*' '+' test_loss:{:.3f}, test_F1:{:.3f}, test_precision:{:.3f}, test_recall:{:.3f}, test_mcc:{:.3f}, test_fp:{}, lr:{}'.format(
test_loss, test_f1, test_precision, test_recall, test_mcc, test_fp, lr)
each_epoch_time = "------------The {} epoch spend {}m-{:.4f}s------------".format(real_epoch, int((end_time-start_time)/60),
(end_time-start_time)%60)
print(train_log)
print(valid_log)
print(test_log)
print(each_epoch_time)
with open(log_file, 'a') as f:
f.write(train_log+'\n')
f.write(valid_log+'\n')
f.write(test_log+'\n')
f.write(each_epoch_time+'\n')
plot_loss.append([real_epoch, train_loss, valid_loss])
plot_precision.append([real_epoch, train_precision, valid_precision])
plot_recall.append([real_epoch, train_recall, valid_recall])
time_epochs = time_epochs + 1
if epoch != 0:
if abs(last_valid_loss - valid_loss)/last_valid_loss <= 0.005 or valid_loss > last_valid_loss:
unchange_num = unchange_num+1
else:
unchange_num = 0
if unchange_num == 10: # second run don't stop early
break
last_valid_loss = valid_loss
if pre_param == "":
plot_loss = plot_loss[0: best_epoch]
plot_precision = plot_precision[0: best_epoch]
plot_recall = plot_recall[0: best_epoch]
return plot_loss, plot_precision, plot_recall, best_param, best_model, time_epochs
else:
dir_save = "./save/" + model_type + "-{:.3f}-{}-{}-{}-cv{}".format(best_param['valid_loss'], best_param['valid_epoch'], pre_param["valid_epoch"]+real_epoch, nega_type, choose_cv)
if not os.path.exists(dir_save):
os.makedirs(dir_save)
print(dir_save+" create successful!")
else:
print(dir_save+" already exists.")
os.system("cp " + log_file + ' ' + dir_save)
torch.save(best_model.state_dict(), dir_save+'/best-model-{:.3f}-{}-{}.pth'.format(best_param['valid_loss'], best_param['valid_epoch'], pre_param["valid_epoch"]+real_epoch))
print("radius:{}, T:{}, fingerprint_dim:{}, weight_decay:{}, learning_rate:{}, p_dropout:{}".format(radius, T, fingerprint_dim, weight_decay, learning_rate, p_dropout))
return plot_loss, plot_precision, plot_recall, best_param, best_model, dir_save, time_epochs
def split_kfold(all_df):
all_df.reset_index(drop=True, inplace=True)
all_df['seqs'] = all_df.loc[0, 'seqs']
positive_df = all_df[all_df[TASK].isin([1])]
negative_df = all_df[all_df[TASK].isin([0])]
positive_index_list = list(positive_df.index)
negative_index_list = list(negative_df.index)
random.shuffle(positive_index_list)
random.shuffle(negative_index_list)
per_posi_cv_num = int(len(positive_index_list)/cv_num)
per_nega_cv_num = int(len(negative_index_list)/cv_num)
cv_index_list = []
for i in range(cv_num):
if i == cv_num - 1:
cv_index_list.append(positive_index_list[i*per_posi_cv_num:]+negative_index_list[i*per_nega_cv_num:])
else:
cv_index_list.append(positive_index_list[i*per_posi_cv_num: (i+1)*per_posi_cv_num]+negative_index_list[i*per_nega_cv_num: (i+1)*per_nega_cv_num])
return cv_index_list
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
if n_gpu > 0:
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
torch.backends.cudnn.deterministic = True #cpu/gpu结果一致
torch.backends.cudnn.benchmark = False #训练集变化不大时使训练加速
file_name = "./data/SARS-3CL_with_aff20_acsmpro_408pos_1358neg.xlsx"
test_file_name = "./data/3CL_enzymatic_activity_noaff_for_test_493pos_9459neg.xlsx"
new_add_file = "./data/SARS-3CL_414_1860_new_data_part.xlsx"
predict_file_name = "./data/mcule_201201_rdsmi_%d_small.csv"
nega_seed = 50 # [50, 3, 24]
parser = argparse.ArgumentParser()
parser.add_argument("--nega_seed", default=nega_seed, type=int, help="")
args = parser.parse_args()
nega_type = '8w_%d' % args.nega_seed
nega_file_name = "./data/SARS-3CL_no_aff_neg_test_29w_dropaff_8w_%d_small.xlsx" % args.nega_seed
load_model_path_list = ["./save/bert-finetune-0.016-102-147-8w_50-cv1/best-model-0.016-102-147.pth", \
"./save/bert-finetune-0.014-74-148-8w_50-cv2/best-model-0.014-74-148.pth", \
"./save/bert-finetune-0.012-148-168-8w_50-cv3/best-model-0.012-148-168.pth"]
model_type = "bert-finetune"
seq_model_type = model_type
task_type = "classification"
p_dropout = 0.4
epochs = 2
weight_decay = 4 # also known as l2_regularization_lambda
learning_rate = 3
patience = 30
radius = 3
T = 1
TASK = 'label'
SMILES = "rdkit_smiles"
cm_seq = 'seq'
nonspecial_vocab_size = 26
special_vocab_size = 5
max_seq_len = 512
fingerprint_dim=150
pro_gat_dim=64
n_gpu = 4
gpu_start = 0
cv_num = 3
per_gpu_batch_size = 32
do_train = False
do_predict = True
batch_size = per_gpu_batch_size * n_gpu
pro_seq_dim = 512
VOCAB_PATH = "./pretrained_model/protein_vocab.json"
cm_pickle_dir = './data/3d_pdb_v2'
seed = 3
set_seed(seed)
with open(VOCAB_PATH) as f:
vocab = json.load(f)
if do_train:
max_atom_len = 296 - 1
max_bond_len = 304 - 1
nega_data_handler = DataHandler(nega_file_name, max_atom_len=max_atom_len, max_bond_len=max_bond_len)
nega_feature_dicts = nega_data_handler.smile_feature_dict
nega_data_df = nega_data_handler.data_df
nega_data_df_sample = nega_data_df
data_handler = DataHandler(file_name, max_atom_len=max_atom_len, max_bond_len=max_bond_len)
all_df = data_handler.data_df
all_df = pd.concat([all_df, nega_data_df_sample], axis=0)
cv_index_list = split_kfold(all_df)
new_add_data_handler = DataHandler(new_add_file, max_atom_len=max_atom_len, max_bond_len=max_bond_len)
new_add_feature_dicts = new_add_data_handler.smile_feature_dict
new_add_data_df = new_add_data_handler.data_df
new_add_cv_index_list = split_kfold(new_add_data_df)
raw_feature_dicts = data_handler.smile_feature_dict
test_data_handler = DataHandler(test_file_name, max_atom_len=max_atom_len, max_bond_len=max_bond_len)
test_df = test_data_handler.data_df
test_df['seqs'] = all_df.loc[0, 'seqs']
test_feature_dicts = test_data_handler.smile_feature_dict
feature_dicts = {'smiles_to_atom_mask':{}, 'smiles_to_atom_info':{}, 'smiles_to_bond_info':{},
'smiles_to_atom_neighbors':{}, 'smiles_to_bond_neighbors':{}, 'smiles_to_rdkit_list':{}}
feature_dicts['smiles_to_atom_mask'].update(raw_feature_dicts['smiles_to_atom_mask'])
feature_dicts['smiles_to_atom_mask'].update(test_feature_dicts['smiles_to_atom_mask'])
feature_dicts['smiles_to_atom_mask'].update(nega_feature_dicts['smiles_to_atom_mask'])
feature_dicts['smiles_to_atom_mask'].update(new_add_feature_dicts['smiles_to_atom_mask'])
feature_dicts['smiles_to_atom_info'].update(raw_feature_dicts['smiles_to_atom_info'])
feature_dicts['smiles_to_atom_info'].update(test_feature_dicts['smiles_to_atom_info'])
feature_dicts['smiles_to_atom_info'].update(nega_feature_dicts['smiles_to_atom_info'])
feature_dicts['smiles_to_atom_info'].update(new_add_feature_dicts['smiles_to_atom_info'])
feature_dicts['smiles_to_bond_info'].update(raw_feature_dicts['smiles_to_bond_info'])
feature_dicts['smiles_to_bond_info'].update(test_feature_dicts['smiles_to_bond_info'])
feature_dicts['smiles_to_bond_info'].update(nega_feature_dicts['smiles_to_bond_info'])
feature_dicts['smiles_to_bond_info'].update(new_add_feature_dicts['smiles_to_bond_info'])
feature_dicts['smiles_to_atom_neighbors'].update(raw_feature_dicts['smiles_to_atom_neighbors'])
feature_dicts['smiles_to_atom_neighbors'].update(test_feature_dicts['smiles_to_atom_neighbors'])
feature_dicts['smiles_to_atom_neighbors'].update(nega_feature_dicts['smiles_to_atom_neighbors'])
feature_dicts['smiles_to_atom_neighbors'].update(new_add_feature_dicts['smiles_to_atom_neighbors'])
feature_dicts['smiles_to_bond_neighbors'].update(raw_feature_dicts['smiles_to_bond_neighbors'])
feature_dicts['smiles_to_bond_neighbors'].update(test_feature_dicts['smiles_to_bond_neighbors'])
feature_dicts['smiles_to_bond_neighbors'].update(nega_feature_dicts['smiles_to_bond_neighbors'])
feature_dicts['smiles_to_bond_neighbors'].update(new_add_feature_dicts['smiles_to_bond_neighbors'])
feature_dicts['smiles_to_rdkit_list'].update(raw_feature_dicts['smiles_to_rdkit_list'])
feature_dicts['smiles_to_rdkit_list'].update(test_feature_dicts['smiles_to_rdkit_list'])
feature_dicts['smiles_to_rdkit_list'].update(nega_feature_dicts['smiles_to_rdkit_list'])
feature_dicts['smiles_to_rdkit_list'].update(new_add_feature_dicts['smiles_to_rdkit_list'])
for choose_cv in range(1, cv_num+1):
valid_df = all_df.iloc[cv_index_list[choose_cv-1], :]
train_df = all_df.drop(cv_index_list[choose_cv-1], axis=0)
valid_df_add = new_add_data_df.iloc[new_add_cv_index_list[choose_cv-1], :]
train_df_add = new_add_data_df.drop(new_add_cv_index_list[choose_cv-1], axis=0)
valid_df = pd.concat([valid_df, valid_df_add], axis=0)
train_df = | pd.concat([train_df, train_df_add], axis=0) | pandas.concat |
""" Test cases for DataFrame.plot """
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
@td.skip_if_no_mpl
class TestDataFrameColor(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def test_mpl2_color_cycle_str(self):
# GH 15516
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", "MatplotlibDeprecationWarning")
for color in colors:
_check_plot_works(df.plot, color=color)
# if warning is raised, check that it is the exact problematic one
# GH 36972
if w:
match = "Support for uppercase single-letter colors is deprecated"
warning_message = str(w[0].message)
msg = "MatplotlibDeprecationWarning related to CN colors was raised"
assert match not in warning_message, msg
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=["red"])
@pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)])
def test_rgb_tuple_color(self, color):
# GH 16695
df = | DataFrame({"x": [1, 2], "y": [3, 4]}) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
from dash.dependencies import Input, Output, State, ClientsideFunction
from dash.exceptions import PreventUpdate
from enum import Enum, unique
from app import app
# import components and its generation
from components.upload.upload import upload_component_generate
from components.download.download import download_component_generate
from components.oversampling.oversampling import oversampling_component_generate
from components.tab.tabs import ft_tabs_generate
from components.loglinearswitch.axisSwitch import vertical_axis_swith
# import algorithm
from algorithm.oversample import get_oversampling_data
from algorithm.read_data import convert_lists_to_df, generate_df, generate_df_from_local, replace_dict_value
from algorithm.pwft import fast_ftdata
from algorithm.saving_process import combine_as_complex, six_decimal_saving
# Using your own app name. Can't be same.
prefix_app_name = "FTAPP"
# Selection options
@unique
class DOWNLOAD_OPTIONS(Enum):
OVERSAMPLED_RAW_DATA = 0
FT_RAW_DATA = 1
FT_OVERSAMPLED_DATA = 2
@unique
class TIME_DERIVATED(Enum):
NONTIME_DERIVATED = False
TIME_DERIVATED = True
# TODO need modify and change the algorithm plus with function
Layout = dbc.Row([
dbc.Col([
html.H5("Support .txt"),
html.Div([
upload_component_generate("FTAPP-upload"),
dcc.Store(id="FTAPP-raw-data-store", storage_type="session"),
dcc.Store(id="FTAPP-oversampling-data-store", storage_type="session"),
dcc.Store(id="FTAPP-ft-data-store", storage_type="session"),
dcc.Loading([dcc.Store(id="FTAPP-oversampled-ft-data-store", storage_type="session")],
id="FTAPP-full-screen-mask", fullscreen=True)
], className="btn-group me-2"),
html.Div([dbc.Button("Load Example data", id="FTAPP-load-example",
color="primary", style={"margin": "5px"})],
className="btn-group me-2"),
html.Div(id="FTAPP-upload-message"),
# This is just for show the loading message
html.Div(id="FTAPP-loading-message"),
html.Hr(),
oversampling_component_generate(prefix_app_name),
html.Hr(),
download_component_generate(prefix_app_name)
], width=3),
dbc.Col([
ft_tabs_generate(prefix_app_name),
vertical_axis_swith(prefix_app_name),
],
width=True),
# Loading
])
# ================ Upload callback ========================
"""
Trigger when the experiental data(raw data) uploaded
"""
@app.callback(
Output("FTAPP-raw-data-store", "data"),
Output("FTAPP-ft-data-store", "data"),
# Output("FTAPP-upload-message", "children"),
Output("FTAPP-loading-message", "children"),
Input("FTAPP-upload", "contents"),
Input("FTAPP-load-example", "n_clicks"),
Input("FTAPP-refresh-btn", "n_clicks"),
State("FTAPP-g_0", "value"),
State("FTAPP-g_inf", "value"),
State("FTAPP-oversampling-Nf", "value"),
State("FTAPP-upload", "filename"),
State("FTAPP-raw-data-store", "data"),
State("FTAPP-ft-data-store", "data"),
prevent_initial_call=True
)
def store_raw_data(content, example_click, refresh_click,
g_0, g_inf, N_f, file_name,
prev_raw_data, prev_ft_raw_data):
# Deciding which raw_data used according to the ctx
ctx = dash.callback_context
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
# If all the input value is default, it means no update
# if g_0 is None and g_inf is None and N_f is None:
# disable_refresh_updated = True
# else:
# disable_refresh_updated = False
# default g_0: 1, g_inf: 0
g_0 = 1 if g_0 is None else float(g_0)
g_inf = 0 if g_inf is None else float(g_inf)
N_f = 100 if N_f is None else int(N_f)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 11:51:18 2021
@author: f004swn
"""
import numpy as np
import ast
import time
import cv2
import pandas as pd
#import face_recognition
from scenedetect import VideoManager, SceneManager
from scenedetect.detectors import ContentDetector
from tqdm import tqdm
import os.path as osp
import os
import subprocess
import traceback
from pathlib import Path
import gdown
from requests.exceptions import MissingSchema
import zipfile
from sklearn.metrics import confusion_matrix
from PIL import Image
import base64
from io import BytesIO
def img_to_b64(arr_img):
pil_img = Image.fromarray(arr_img)
prefix = "data:image/png;base64,"
with BytesIO() as stream:
pil_img.save(stream, format="png")
base64_string = prefix + base64.b64encode(stream.getvalue()).decode("utf-8")
return base64_string
#def video_to_bytes(vid_arr):
# out_strings = []
# for img in range(vid_arr.shape[0]):
# out_strings.append(img_to_b64(vid_arr[img]))
# return out_strings
def bytes_to_arr(bString):
r = base64.b64decode(bString, + "==")
q = np.frombuffer(r, dtype=np.float64)
return q
def video_to_images(vid_file, img_folder=None, return_info=False):
if img_folder is None:
img_folder = osp.join('/scratch', osp.basename(vid_file).replace('.', '_'))
os.makedirs(img_folder, exist_ok=True)
command = ['ffmpeg',
'-i', vid_file,
'-f', 'image2',
'-v', 'error',
f'{img_folder}/%06d.png']
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
print(f'Images saved to \"{img_folder}\"')
img_shape = cv2.imread(osp.join(img_folder, '000001.png')).shape
if return_info:
return img_folder, len(os.listdir(img_folder)), img_shape
else:
return img_folder
def from_np_array(array_string):
# this is old
if 'e' in array_string:
out = array_string.strip('[]').split(' ')
out = [i.strip('\n') for i in out]
out = [ast.literal_eval(i) for i in out]
return out
# converter function for interpreting face data csv
else:
array_string = ','.join(array_string.replace('[ ', '[').split())
return array_string
#return np.array(ast.literal_eval(array_string))
def string2list(string):
# converter function for interpreting face data csv
if '.' in string:
vals = [float(i) for i in string[1:-1].replace('.', '').split(' ') if i!='']
else:
vals = [float(i) for i in string[1:-1].replace(',', '').split(' ') if i!='']
return vals
def string_is_int(s):
try:
int(s)
return True
except ValueError:
return False
def ts_to_frame(ts, framerate):
# converts a timestamp to frame
h, m, s = ts.split(':')
conv_ts = (int(h)*60*60 + int(m)*60 + int(s))*framerate
return round(conv_ts)
def frame_to_ts(frame, fps):
seconds = round(frame//fps)
ts = time.strftime('%H:%M:%S', time.gmtime(seconds))
return ts
def check_match(bod, fac):
# need to update this because of newly added py-feat face detection
bcx, bcy, bw, bh = [float(i) for i in bod] #body corner is bottom left
fcx, fcy, fw, fh = [float(i) for i in fac] #face corner is top left
top_b, right_b, bottom_b, left_b = [(bcy-bh/2), (bcx+bw/2), (bcy+bh/2), (bcx-bw/2)]
top_f, right_f, bottom_f, left_f = [fcy, fcx+fw, fcy+fh, fcx]
face_x = (right_f-left_f)/2 + left_f
face_y = (bottom_f-top_f)/2 + top_f
if (left_b < face_x < right_b and
top_b < face_y < bottom_b):
return True
else:
return False
def frame2array(frame_no, video_opened):
# returns a video from as a numpy array in uint8
video_opened.set(cv2.CAP_PROP_POS_FRAMES,frame_no)
ret, frame = video_opened.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#cv2.destroyAllWindows()
return frame
def resize_image(array, newsize):
array = cv2.resize(array, dsize=newsize, interpolation=cv2.INTER_CUBIC)
array = np.expand_dims(array, axis=0)[0]
return array
def crop_image(array, bbox):
top, right, bottom, left = bbox
new_img = array[top:bottom, left:right, :]
return new_img
def crop_image_wh(array, data):
# The _wh means it is taking the py-feat face bounding box format
# Top-left corner x and y, then the width and height of the box from that point
cx, cy, w, h = [i for i in data]
# I don't think this calculation is right:
top, right, bottom, left = [int(round(i)) for i in [(cy-h/2), int(cx+w/2), int(cy+h/2), (cx-w/2)]]
new_img = array[top:bottom, left:right, :]
return new_img
def crop_image_body(array, data):
# you can now just use on crop image function because the body and face bboxes are in the same format
cx, cy, w, h = [i for i in data]
top, right, bottom, left = [int(round(i)) for i in [(cy-h/2), int(cx+w/2), int(cy+h/2), (cx-w/2)]]
new_img = array[top:bottom, left:right, :]
return new_img
def evaluate_pred_ID(charList, ground, pred):
#ground and pred need to be same-shape np arrays
# might be better to take dataframes so columns
# can be cross-referenced (less preprocessing of presence matrices)
chars = list(charList)
chars.insert(0, 'metric')
metrics = ['overall', 'true_positive', 'true_negative',
'false_positive', 'false_negative', 'true_presence_proportion']
acc_df = | pd.DataFrame(columns=chars) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tqsdk
from tqsdk.tafunc import time_to_str
import tqsdk.tafunc
import time
import numba
import re
import requests
import json
def 调仓函数(api,品种,目标值):
当前持仓=api.get_position(品种)
当前值=当前持仓.pos_long-当前持仓.pos_short
if 目标值>当前值:
需要增加仓位值=目标值-当前值
if 当前持仓.pos_short>0:
if 当前持仓.pos_short>=需要增加仓位值:
if 需要增加仓位值:
追单平仓2(api,品种,"BUY",需要增加仓位值)
else:
平仓值1=当前持仓.pos_short
开仓值1=需要增加仓位值-平仓值1
if 平仓值1:
追单平仓2(api,品种,"BUY",平仓值1)
if 开仓值1:
追单开仓2(api,品种,"BUY",开仓值1)
else:
if 需要增加仓位值:
追单开仓2(api,品种,"BUY",需要增加仓位值)
else:
需要减少仓位值=当前值-目标值
if 当前持仓.pos_long>0:
if 当前持仓.pos_long>=需要减少仓位值:
if 需要减少仓位值:
追单平仓2(api,品种,"SELL",需要减少仓位值)
else:
平仓值1=当前持仓.pos_long
开仓值1=需要减少仓位值-平仓值1
if 平仓值1:
追单平仓2(api,品种,"SELL",平仓值1)
if 开仓值1:
追单开仓2(api,品种,"SELL",开仓值1)
else:
if 需要减少仓位值:
追单开仓2(api,品种,"SELL",需要减少仓位值)
while True:
当前持仓=api.get_position(品种)
当前值=当前持仓.pos_long-当前持仓.pos_short
if 当前值==目标值:
break
if 当前值==目标值:
break
api.wait_update()
当前持仓=api.get_position(品种)
当前值=当前持仓.pos_long-当前持仓.pos_short
if 当前值==目标值:
break
def 开仓有效时间(有效time_list):
当前时间=time.strftime("%H:%M",time.localtime())
时间判断真假=[ x[0]<=当前时间<x[1] for x in 有效time_list]
if any(时间判断真假):
return 1
return 0
def 查询最近成交时间(api,品种,买卖,开平):
总成交=api.get_trade()
l=[]
for x in 总成交:
成交信息=api.get_trade(x)
l.append((成交信息.trade_date_time,成交信息.direction,成交信息.offset,\
成交信息.price,成交信息.exchange_id+"."+成交信息.instrument_id ))
l=sorted(l,key=lambda x: x[0],reverse=True)
for x in l:
if 买卖=="买" and 开平=="开":
for x in l:
if x[1]=="BUY" and x[2]=="OPEN" and x[4]==品种:
return x[0]
if 买卖=="买" and 开平=="平":
for x in l:
if x[1]=="BUY" and x[2] in ("CLOSE","CLOSETODAY") and x[4]==品种:
return x[0]
if 买卖=="卖" and 开平=="开":
for x in l:
if x[1]=="SELL" and x[2]=="OPEN" and x[4]==品种:
return x[0]
if 买卖=="卖" and 开平=="平":
for x in l:
if x[1]=="SELL" and x[2] in ("CLOSE","CLOSETODAY") and x[4]==品种:
return x[0]
def 维护持仓_redis(连接,品种,买卖,开平,价格,数量):
全品种=[ x.decode() for x in 连接.keys("*")]
if 品种 not in 全品种:
data={"多仓":0,"空仓":0,"多仓成本":0,"空仓成本":0}
else:
data=eval(连接.get(品种))
if 买卖=="买" and 开平=="开":
data["多仓成本"]= (data["多仓"]*data["多仓成本"]+价格*数量)/(data["多仓"]+数量)
data["多仓"]=data["多仓"]+数量
if 买卖=="买" and 开平=="平":
data["空仓"]=data["空仓"]-数量
if 买卖=="卖" and 开平=="开":
data["空仓成本"]= (data["空仓"]*data["空仓成本"]+价格*数量)/(data["空仓"]+数量)
data["空仓"]=data["空仓"]+数量
if 买卖=="卖" and 开平=="平":
data["多仓"]=data["多仓"]-数量
连接.set(品种,str(data))
return data
def 获取持仓_redis(连接,品种):
全品种=[ x.decode() for x in 连接.keys("*")]
if 品种 not in 全品种:
data={"多仓":0,"空仓":0,"多仓成本":0,"空仓成本":0}
连接.set(品种,str(data))
else:
data=eval(连接.get(品种).decode())
return data
def 维护持仓(路径地址,买卖,开平,价格,数量):
try:
f=open(路径地址,"r")
data=eval(f.read())
f.close()
except:
f=open(路径地址,"w")
f.close()
data={"多仓":0,"空仓":0,"多仓成本":0,"空仓成本":0}
if 买卖=="买" and 开平=="开":
data["多仓成本"]= (data["多仓"]*data["多仓成本"]+价格*数量)/(data["多仓"]+数量)
data["多仓"]=data["多仓"]+数量
if 买卖=="买" and 开平=="平":
data["空仓"]=data["空仓"]-数量
if 买卖=="卖" and 开平=="开":
data["空仓成本"]= (data["空仓"]*data["空仓成本"]+价格*数量)/(data["空仓"]+数量)
data["空仓"]=data["空仓"]+数量
if 买卖=="卖" and 开平=="平":
data["多仓"]=data["多仓"]-数量
f=open(路径地址,"w")
f.write(str(data))
f.close()
return data
def 获取持仓(路径地址):
try:
f=open(路径地址,"r")
data=eval(f.read())
f.close()
except:
f=open(路径地址,"w")
f.close()
data={"多仓":0,"空仓":0,"多仓成本":0,"空仓成本":0}
return data
def 查询最近成交价格(api,买卖,开平):
总成交=api.get_trade()
l=[]
for x in 总成交:
成交信息=api.get_trade(x)
l.append((time_to_str(成交信息.trade_date_time),成交信息.direction,成交信息.offset,成交信息.price))
l=sorted(l,key=lambda x: x[0],reverse=True)
for x in l:
if 买卖=="买" and 开平=="开":
for x in l:
if x[1]=="BUY" and x[2]=="OPEN":
return x[3]
if 买卖=="买" and 开平=="平":
for x in l:
if x[1]=="BUY" and x[2] in ("CLOSE","CLOSETODAY"):
return x[3]
if 买卖=="卖" and 开平=="开":
for x in l:
if x[1]=="SELL" and x[2]=="OPEN":
return x[3]
if 买卖=="卖" and 开平=="平":
for x in l:
if x[1]=="SELL" and x[2] in ("CLOSE","CLOSETODAY"):
return x[3]
def 追单开仓2(api,品种,方向,需要平的数量,增加的点差=20):
qutoe=api.get_quote(品种)
最小变动单位=查询合约相关信息(api,品种)["合约最小跳数"]
if 方向=="SELL":
下单价格=qutoe.lower_limit if qutoe.last_price-最小变动单位*增加的点差< qutoe.lower_limit else qutoe.last_price-最小变动单位*增加的点差
else:
下单价格=qutoe.upper_limit if qutoe.last_price+最小变动单位*增加的点差> qutoe.upper_limit else qutoe.last_price+最小变动单位*增加的点差
a=api.insert_order(品种,方向,"OPEN",需要平的数量,下单价格)
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
print("成交价格",成交价格)
return 成交价格
def 追单平仓2(api,品种,方向,需要平的数量,增加的点差=20):
持仓=api.get_position(品种)
多仓=持仓.pos_long_his+持仓.pos_long_today
空仓=持仓.pos_short_his+持仓.pos_short_today
qutoe=api.get_quote(品种)
最小变动单位=查询合约相关信息(api,品种)["合约最小跳数"]
成交价格=0
if 品种.split('.')[0] in ('SHFE','INE') :
if 方向=="SELL":
下单价格=qutoe.lower_limit if qutoe.last_price-最小变动单位*增加的点差< qutoe.lower_limit else qutoe.last_price-最小变动单位*增加的点差
平昨日仓=min(需要平的数量,持仓.pos_long_his)
if 平昨日仓:
a=api.insert_order(品种,方向,'CLOSE',平昨日仓,limit_price=下单价格)
#tqsdk.lib.InsertOrderUntilAllTradedTask(api,品种,方向,"CLOSE",平昨日仓)
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
还需平的仓=需要平的数量-平昨日仓
平今日仓=min(还需平的仓,持仓.pos_long_today)
if 平今日仓:
a=api.insert_order(品种,方向,'CLOSETODAY',平今日仓,limit_price=下单价格)
#tqsdk.lib.InsertOrderUntilAllTradedTask(api,品种,方向,"CLOSETODAY",平今日仓)
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
else:
下单价格=qutoe.upper_limit if qutoe.last_price+最小变动单位*增加的点差> qutoe.upper_limit else qutoe.last_price+最小变动单位*增加的点差
平昨日仓=min(需要平的数量,持仓.pos_short_his)
if 平昨日仓:
a=api.insert_order(品种,方向,'CLOSE',平昨日仓,limit_price=下单价格)
#tqsdk.lib.InsertOrderUntilAllTradedTask(api,品种,方向,"CLOSE",平昨日仓)
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
还需平的仓=需要平的数量-平昨日仓
平今日仓=min(还需平的仓,持仓.pos_short_today)
if 平今日仓:
a=api.insert_order(品种,方向,'CLOSETODAY',平今日仓,limit_price=下单价格)
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
#tqsdk.lib.InsertOrderUntilAllTradedTask(api,品种,方向,"CLOSETODAY",平今日仓)
else:
if 方向=="SELL":
下单价格=qutoe.lower_limit if qutoe.last_price-最小变动单位*增加的点差< qutoe.lower_limit else qutoe.last_price-最小变动单位*增加的点差
if min(多仓,需要平的数量):
a=api.insert_order(品种,方向,'CLOSE',min(多仓,需要平的数量),limit_price=下单价格)
#tqsdk.lib.InsertOrderUntilAllTradedTask(api,品种,方向,"CLOSE",min(多仓,需要平的数量))
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
else:
下单价格=qutoe.upper_limit if qutoe.last_price+最小变动单位*增加的点差> qutoe.upper_limit else qutoe.last_price+最小变动单位*增加的点差
if min(空仓,需要平的数量):
a=api.insert_order(品种,方向,'CLOSE',min(空仓,需要平的数量),limit_price=下单价格)
#tqsdk.lib.InsertOrderUntilAllTradedTask(api,品种,方向,"CLOSE",min(空仓,需要平的数量))
while True:
api.wait_update()
a=api.get_order(a.order_id)
if a.status=="FINISHED":
成交价格=a.trade_price
break
# 持仓=api.get_position(品种)
# while True:
# if 方向=="SELL":
# if 持仓.pos_long==0:
# break
# else:
# if 持仓.pos_short==0:
# break
# api.wait_update()
# time.sleep(1)
return 成交价格
def 品种无交易所简称查带交易简称(api,无交易所的简称列表):
主力合约=查询所有主力合约(api)
全称列表=[ x.split("@")[1] for x in 主力合约 ]
简称列表=[ x.split(".")[1] for x in 全称列表]
l=[]
try:
for x in 无交易所的简称列表:
l.append(全称列表[简称列表.index(x)])
except:
print(x,"没有这个品种")
return l
def 根据合约名字列表和周期查询出data字典(合约名称列表,周期):
d={}
for x in 合约名称列表:
if "@" not in x:
大类名字= re.findall("([a-zA-Z]*)[0-9]{1,}$",x)[0]
else:
大类名字= x.split(".")[-1]
data=pd.read_csv(".\\data\\"+大类名字+"\\"+x+"\\"+str(周期)+".csv",header=None)
d[x]=data.to_numpy()
return d
def numpy_to_pandas(data):
return pd.DataFrame(data,columns=["datetime",'open','high','low','close',"volume","open_oi","close_oi"])
def 根据限定时间查到涉及到的具体合约(start时间,end时间,查询列表,历史data文件夹):
l=[]
startnew_timestamp=time.mktime(time.strptime(start时间,"%Y-%m-%d"))*1e9
endnew_timestamp=time.mktime(time.strptime(end时间,"%Y-%m-%d"))*1e9
for x in 查询列表:
data=pd.read_csv(历史data文件夹+"\\"+x+'.csv')
for y in range(len(data)):
#print(startnew_timestamp,data.datetime.iloc[x],endnew_timestamp)
if startnew_timestamp<data.datetime.iloc[y]<endnew_timestamp:
if data.symbol_main.iloc[y] not in l:
l.append(data.symbol_main.iloc[y])
return l
def dingding_message(地址,内容):
headers = {'Content-Type': 'application/json'}
data = {
"msgtype": "text",
"text": {
"content": 内容
}
}
requests.post(地址, data=json.dumps(data), headers=headers)
def 查询合约中文名(品种):
合约名字=品种.split('.')[1]
合约具体=re.findall("([a-zA-Z]{1,})",合约名字)[0]
# print(合约具体)
d={
"IF":"IF",
"IH":"IH",
"IC":"IC",
"TS":"二债",
"TF":"五债",
"TF":"五债",
"T":"十债",
"cu":"沪铜",
"zn":"沪锌",
"al":"沪铝",
"pd":"沪铅",
"ni":"沪镍",
"sn":"沪锡",
"au":"沪金",
"ag":"沪银",
'rb':"螺纹",
'wr':"线材",
'hc':"热卷",
'ss':"SS",
'fu':"燃油",
'bu':"沥青",
'ru':"橡胶",
'sp':"纸浆",
"m":"豆粕",
"y":"豆油",
"a":"豆一",
"b":"豆二",
"p":"棕榈",
"c":"玉米",
"cs":"淀粉",
"rr":"粳米",
"jd":"鸡蛋",
"bb":"胶板",
"fb":"纤板",
"l":"塑料",
"v":"PVC",
"eg":"EG",
"pp":"PP" ,
"eb":"EB",
"j":"焦炭",
"jm":"焦煤",
"i":"铁矿",
"pg":"LPG",
"SR":"白糖",
"CF":"郑棉",
"CY":"棉纱",
"ZC":"郑煤",
"FG":"玻璃",
"TA":"PTA" ,
"MA":"郑醇",
"UR":"尿素",
"SA":"纯碱",
"WH":"郑麦",
"RI":"早稻",
"LR":"晚稻",
"JR":"粳稻",
"RS":"菜籽",
"OI":"郑油",
"RM":"菜粕",
"SF":"硅铁",
"SM":"锰硅",
"AP":"苹果",
"CJ":"红枣",
"sc":"原油",
"nr":"NR",
}
if 合约具体 in d:
return d[合约具体]
return 合约具体
def ea_break(api,成交记录文件地址):
struct_time=time.localtime()
时间=time.strftime("%H:%M:%S",struct_time)
if "02:30:00"<=时间<"08:30:00":
return 1
if "15:15:00"<=时间<"20:30:00":
try:
f=open(成交记录文件地址,"r")
data=f.read()
f.close()
except:
data=""
f=open(成交记录文件地址,'a+')
a=api.get_trade()
if not data:
f.write("成交时间,成交品种,下单方向,开平标志,委托价格,成交价格,成交手数,委托单号,成交单号,成交手续费\n")
for x in a:
b=api.get_trade(x)
#获取成交的订单信息
c=api.get_order(b.order_id)
f.write(','.join([time_to_str(b.trade_date_time),
b.exchange_id+b.instrument_id,
b.direction,
b.offset,
str(c.limit_price),
str(b.price),
str(b.volume),
b.order_id,
b.trade_id,
str(b.commission)])+'\n'
)
f.close()
return 1
return 0
def 订阅异常(api,x):
try:
return api.get_kline_serial(x,60*60*24,2000)
except:
return 0
def 查询品种主力的月份(api,品种):
t1=time.time()
总合约=查询所有合约(api)
t2=time.time()
品种相关合约=[ x for x in 总合约 if re.findall("^"+品种+"[0-9]{1,}$",x)]
品种相关合约=sorted(品种相关合约)
t3=time.time()
订阅data合约字典={}
# 订阅data合约字典={ x:api.get_kline_serial(x,60*60*24,2000) for x in 品种相关合约}
for x in 品种相关合约:
a=订阅异常(api,x)
if type(a) !=type(0):
订阅data合约字典[x]=a
t4=time.time()
订阅data合约新字典={ x:{ 订阅data合约字典[x].datetime.iloc[y] : 订阅data合约字典[x].volume.iloc[y] for y in range(len(订阅data合约字典[x]))} for x in 订阅data合约字典}
t5=time.time()
订阅主连=api.get_kline_serial("KQ.m@"+品种,60*60*24,2000)
t6=time.time()
l=[]
for x in range(0,2000,10):
if 订阅主连.datetime.iloc[x]:
日期=订阅主连.datetime.iloc[x]
Volume=订阅主连.volume.iloc[x]
for y in 订阅data合约新字典:
if 日期 in 订阅data合约新字典[y]:
if 订阅data合约新字典[y][日期]==Volume:
l.append(y)
break
else:
l.append("")
else:
l.append("")
t7=time.time()
l.append(api.get_quote("KQ.m@"+品种)["underlying_symbol"])
t8=time.time()
#print("t2-t1",t2-t1,"t3-t2",t3-t2,"t4-t3",t4-t3,"t5-t4",t5-t4,"t6-t5",t6-t5,"t7-t6",t7-t6,"t8-t7",t8-t7)
b=set([ x[-2:] for x in l if x])
return b
def 查询品种能成为主力合约的合约(api,品种):
总合约=查询所有合约(api)
品种相关合约=[ x for x in 总合约 if re.findall("^"+品种+"[0-9]{1,}$",x)]
主力月=查询品种主力的月份(api,品种)
l=[]
for x in 品种相关合约:
if x[-2:] in 主力月:
l.append(x)
return l
def 查询品种历史主连映射(api,品种):
总合约=查询所有合约(api)
品种相关合约=[ x for x in 总合约 if re.findall("^"+品种+"[0-9]{1,}$",x)]
合约月份=查询品种主力的月份(api,品种)
品种相关合约=[ x for x in 品种相关合约 if x[-2:] in 合约月份]
#订阅data合约字典={ x:api.get_kline_serial(x,60*60*24,2000) for x in 品种相关合约}
订阅data合约字典={}
for x in 品种相关合约:
a=订阅异常(api,x)
if type(a) !=type(0):
订阅data合约字典[x]=a
订阅data合约新字典={ x:{ 订阅data合约字典[x].datetime.iloc[y] : 订阅data合约字典[x].volume.iloc[y] for y in range(len(订阅data合约字典[x]))} for x in 订阅data合约字典}
订阅主连=api.get_kline_serial("KQ.m@"+品种,60*60*24,2000)
l=[]
for x in range(2000):
if 订阅主连.datetime.iloc[x]:
日期=订阅主连.datetime.iloc[x]
Volume=订阅主连.volume.iloc[x]
for y in 订阅data合约新字典:
if 日期 in 订阅data合约新字典[y]:
if 订阅data合约新字典[y][日期]==Volume:
l.append(y)
break
else:
l.append("")
else:
l.append("")
l[-1]=api.get_quote("KQ.m@"+品种)["underlying_symbol"]
return pd.DataFrame({"datetime":订阅主连.datetime,"symbol_main":pd.Series(l)})
def 查询当前时间是否在交易时间内(api,品种,秒数):
交易时间=查询合约相关信息(api,品种)['交易时间']
交易时间=交易时间["day"]+交易时间["night"]
l=[]
for x in 交易时间:
if x[1]>"24:00:00":
l.append([x[0],"24:01:00"])
l.append(["00:00:00","%02d"%(int(x[1][:2])-24)+x[1][2:]])
else:
l.append(x)
交易时间=l
t1=time.time()-秒数
t1=time.localtime(t1)
t1=time.strftime("%H:%M:%S",t1)
t2=time.time()+秒数
t2=time.localtime(t2)
t2=time.strftime("%H:%M:%S",t2)
data1= [ 1 if x[0]<=t1<x[1] else 0 for x in 交易时间 ]
data2= [ 1 if x[0]<=t2<x[1] else 0 for x in 交易时间 ]
if any(data1) and any(data2):
return 1
return 0
def 查询所有合约(api):
return [ x for x in api._data["quotes"]]
def 查询所有在交易合约(api):
return [ x for x in api._data["quotes"] if api._data["quotes"][x]["expired"]==False]
def 查询所有主力合约(api):
return [ x for x in 查询所有合约(api) if ".m@" in x]
def 查询所有指数合约(api):
return [ x for x in 查询所有合约(api) if ".i@" in x]
def 查询所有主力合约映射到的具体合约(api):
合约原=查询所有主力合约(api)
return [ api._data["quotes"][x]['underlying_symbol'] for x in 合约原]
def 查询合约相关信息(api,品种):
data=api._data['quotes'][品种]
d={}
d['交易时间']=data['trading_time']
d['合约倍数']=data['volume_multiple']
d['合约最小跳数']=data['price_tick']
return d
def 撤销所有平多订单(api,品种):
全部订单=api.get_order()
for x in 全部订单:
单一订单=api.get_order(x)
if 单一订单.instrument_id==品种.split(".")[1] and 单一订单.status=="ALIVE" and 单一订单.direction =="SELL" and (单一订单.offset=="CLOSE" or 单一订单.offset=="CLOSETODAY"):
api.cancel_order(单一订单.order_id)
api.wait_update()
while True:
a=api.get_order(x)
if a.status!="ALIVE":
break
api.wait_update()
def 撤销所有平空订单(api,品种):
全部订单=api.get_order()
for x in 全部订单:
单一订单=api.get_order(x)
if 单一订单.instrument_id==品种.split(".")[1] and 单一订单.status=="ALIVE" and 单一订单.direction =="BUY" and (单一订单.offset=="CLOSE" or 单一订单.offset=="CLOSETODAY"):
api.cancel_order(单一订单.order_id)
api.wait_update()
while True:
a=api.get_order(x)
if a.status!="ALIVE":
break
api.wait_update()
def WH_获取时间对应id(行情,时间点):
a=time.strptime(时间点,'%Y-%m-%d %H:%M:%S')
时间点new_timestamp=time.mktime(a)*1e9
for x in range(len(行情)):
if 行情.datetime.iloc[x]>=时间点new_timestamp:
return 行情.id.iloc[x]
def psar(barsdata, iaf = 0.02, maxaf = 0.2):
length = len(barsdata)
# dates = list(barsdata['Date'])
high = list(barsdata['high'])
low = list(barsdata['low'])
close = list(barsdata['close'])
psar = close[0:len(close)]
psarbull = [None] * length
psarbear = [None] * length
bull = True
af = iaf
ep = low[0]
hp = high[0]
lp = low[0]
for i in range(2,length):
if bull:
psar[i] = psar[i - 1] + af * (hp - psar[i - 1])
else:
psar[i] = psar[i - 1] + af * (lp - psar[i - 1])
reverse = False
if bull:
if low[i] < psar[i]:
bull = False
reverse = True
psar[i] = hp
lp = low[i]
af = iaf
else:
if high[i] > psar[i]:
bull = True
reverse = True
psar[i] = lp
hp = high[i]
af = iaf
if not reverse:
if bull:
if high[i] > hp:
hp = high[i]
af = min(af + iaf, maxaf)
if low[i - 1] < psar[i]:
psar[i] = low[i - 1]
if low[i - 2] < psar[i]:
psar[i] = low[i - 2]
else:
if low[i] < lp:
lp = low[i]
af = min(af + iaf, maxaf)
if high[i - 1] > psar[i]:
psar[i] = high[i - 1]
if high[i - 2] > psar[i]:
psar[i] = high[i - 2]
if bull:
psarbull[i] = psar[i]
else:
psarbear[i] = psar[i]
return pd.Series(psar)
def WH_barlast(data,yes=True):
#获取真值索引
b=data[data==yes].index
#去掉非真值data
c=data.where(data==yes,np.NaN)
#真值索引上的data更新为真值索引
c[b]= b
#填充序列
d=c.fillna(method='ffill')
#返回序列索引号和填充序列的差
return c.index-d
#DATE获取日期
def WH_DATE(time_Series,time_Series2):
#格式化时间为指定格式
b=time_Series.apply(lambda x:time.strftime("%H:%M:%S",time.localtime(x/1e9)))
c=time_Series2.apply(lambda x:time.strftime("%Y-%m-%d",time.localtime(x/1e9)))
当前状态=''
l=[]
for x in range(len(b)):
if x==0:
if b.iloc[x]>="21:00:00":
当前状态="第一天夜里"
l.append(x)
else:
if b.iloc[x]=="21:00:00" :
当前状态="第一天夜里"
l.append(x)
if b.iloc[x]=="09:00:00":
if 当前状态=="第一天夜里":
当前状态="第二天白天"
else:
当前状态="第二天白天"
l.append(x)
d=pd.Series([ np.NaN for x in range(len(time_Series))])
d[l]=c.tolist()[-len(l):]
d=d.fillna(method='ffill')
return d
def WH_转换为年月日UpdateTime(time_Series):
return time_Series.apply(lambda x:time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(x/1e9)))
# api=tqsdk.TqApi()
# k=api.get_kline_serial("SHFE.rb2005",60*60)
# k1=api.get_kline_serial("SHFE.rb2005",60*60*24)
# print(WH_DATE(k.datetime,k1.datetime).tolist())
# api.close()
@numba.jit
def numba_hhv(data,N):
newdata=np.full(len(N),0)
for x in range(len(N)):
temp=data[x]
for y in range(N[x]):
if data[x-y]>temp:
temp=data[x-y]
newdata[x]=temp
return newdata
def WH_hhv(H,NN):
NN=NN.fillna(1)
data=H.to_numpy()
N=NN.to_numpy()
new_data=numba_hhv(data,N)
return pd.Series(new_data)
@numba.jit
def numba_llv(data,N):
newdata=np.full(len(N),0)
for x in range(len(N)):
temp=data[x]
for y in range(N[x]):
if data[x-y]<temp:
temp=data[x-y]
newdata[x]=temp
return newdata
def WH_llv(H,NN):
NN=NN.fillna(1)
data=H.to_numpy()
N=NN.to_numpy()
new_data=numba_hhv(data,N)
return pd.Series(new_data)
def WH_ref(H,NN):
H.fillna(1)
NN=NN.fillna(1)
滑动计算集合=set(NN)
H1=H.copy()
for x in 滑动计算集合:
索引=NN[NN==x].index
H1[索引]=H.shift(int(x))[索引]
return H1
# @numba.jit
# def numba_ref(data,N):
# newdata=np.full(len(N),0.0)
# for x in range(len(N)):
# newdata[x]=data[x-N[x]]
# return newdata
# def WH_ref(H,NN):
# NN=NN.fillna(0)
# data=H.to_numpy()
# N=NN.to_numpy()
# new_data=numba_ref(data,N)
# return pd.Series(new_data)
# a=pd.Series([1,1,1,2,2,2,3,3,3,4,4,4])
# b=pd.Series([0,1,1,2,2,2,3,3,3,4,4,4])
# print(WH_ref(a,b))
#VALUEWHEN(COND,X) 当COND条件成立时,取X的当前值。如COND条件不成立,则取上一次COND条件成立时X的值。
def WH_VALUEWHEN(COND,X):
b=COND[COND].index
c=COND.where(COND,np.NaN)
c[b]= X[b]
d=c.fillna(method='ffill')
return d
def WH_OPENMINUTE(time_Series,有无夜盘="有"):
b=time_Series.apply(lambda x:time.strftime("%H:%M:%S",time.localtime(x/1e9)))
if 有无夜盘=="有":
真值序列索引= b[b=="21:00:00"].index
else:
真值序列索引= b[b=="9:00:00"].index
d=pd.Series([ np.NaN for x in range(len(time_Series))])
d[真值序列索引]=time_Series[真值序列索引]
d=d.fillna(method='ffill')
return (time_Series-d)/(1e9*60)
def WH_and(*args):
return pd.Series( ( all(x) for x in zip(*args)))
def WH_or(*args):
return pd.Series( ( any(x) for x in zip(*args)))
def WH_max(a,b):
a=a.fillna(0)
b=a.fillna(0)
return tqsdk.tafunc.max(a,b)
def WH_信号间隔过滤(开多仓真值序列1,间隔):
temp=-1
l=[]
for x,v in 开多仓真值序列1.items():
if v:
if temp==-1 or x-temp>间隔:
l.append(True)
temp=x
else:
l.append(False)
else:
l.append(False)
return pd.Series(l)
# a=[1,0,0,1,1,0,0,0,0,1,1,1,0,1]
# a=pd.Series(a)
# print(WH_信号间隔过滤(a,2))
def WH_开平点计算持仓(开仓点列表,平仓点列表,方向):
空值序列=pd.Series(np.full([len(开仓点列表[0])],np.NaN))
#print(空值序列)
for x in range(len(开仓点列表)):
#print(开仓点列表[x])
a索引=开仓点列表[x][开仓点列表[x]==1].index
空值序列[a索引]=方向
for x in range(len(平仓点列表)):
a索引=平仓点列表[x][平仓点列表[x]==1].index
空值序列[a索引]=0
持仓矩阵=空值序列.fillna(method="ffill")
# print(持仓矩阵)
if 方向==1:
开仓点矩阵=(持仓矩阵-持仓矩阵.shift(1))>0
平仓点矩阵=(持仓矩阵-持仓矩阵.shift(1))<0
else:
开仓点矩阵=(持仓矩阵-持仓矩阵.shift(1))<0
平仓点矩阵=(持仓矩阵-持仓矩阵.shift(1))>0
return 空值序列.fillna(method="ffill"),开仓点矩阵,平仓点矩阵
def WH_算盈利(开仓点列表,平仓点列表,方向,开仓价格,平仓价格):
持仓过程,a,b=WH_开平点计算持仓(开仓点列表,平仓点列表,方向)
# print(250)
a索引=a[a==1].index
b索引=b[b==1].index
c1=pd.Series(np.full((1,len(a)),np.NaN)[0])
c1[a索引]= 开仓价格[a索引]
开仓成本=c1.fillna(method='ffill')
持仓过程[b索引]=方向
return (平仓价格-开仓成本)*持仓过程
def WH_算开仓成本(开仓点列表,平仓点列表,方向,开仓价格):
持仓过程,a,b=WH_开平点计算持仓(开仓点列表,平仓点列表,方向)
hehe=开仓点列表[0]
# print(len(hehe[hehe==1].index))
# print("看看")
# print(a)
# print(b)
a索引=a[a==1].index
b索引=b[b==1].index
c1=pd.Series(np.full((1,len(a)),np.NaN)[0])
# print(开仓价格)
c1[a索引]= 开仓价格[a索引]
# print(250250250)
开仓成本=c1.fillna(method='ffill')
#持仓过程[b索引]=方向
# print(开仓成本)
return 开仓成本
def WH_WEEKDAY(time_Series,time_Series2,有无夜盘="有"):
日期序列=WH_DATE(time_Series,time_Series2)
# print(日期序列)
日期序列=日期序列.fillna("1970-01-01")
b=日期序列.apply(lambda x: (time.strptime(x,"%Y-%m-%d").tm_wday)+1)
return b
def 算持仓(a,b):
a索引=a[a==1].index
b索引=b[b==1].index
c=pd.Series(np.full((1,len(a)),np.NaN)[0])
c[a索引]=1
c[b索引]=0
d=c.fillna(method="ffill")
return d
def 算盈利(a,b,开盘价格):
a索引=a[a==1].index
b索引=b[b==1].index
c1=pd.Series(np.full((1,len(a)),np.NaN)[0])
d=算持仓(a,b)
c1[a索引]= 开盘价格[a索引]
开仓成本=c1.fillna(method='ffill')
d[b索引]=1
return (开盘价格-开仓成本)*d
def 算盈利_开平价格不同(a,b,开仓价格,平仓价格):
a索引=a[a==1].index
b索引=b[b==1].index
c1=pd.Series(np.full((1,len(a)),np.NaN)[0])
d=算持仓(a,b)
c1[a索引]= 开仓价格[a索引]
开仓成本=c1.fillna(method='ffill')
d[b索引]=1
return (平仓价格-开仓成本)*d
#平仓价格字典={"平仓点序列":[a,b,c],"平仓点价格序列":[a1,b1,c1]}
def WH_算平仓后盈利金额_开平价格不同_平仓价格多序列(a,b,开仓价格,平仓价格字典):
a1=pd.Series(np.full([len(开仓价格)],np.NaN))
b1=pd.Series(np.full([len(开仓价格)],np.NaN))
a1[a]=1
b1[b]=1
a1=a1.fillna(0)
b1=b1.fillna(0)
b的真值索引=b[b==1].index
平仓价格序列=pd.Series(np.full([len(开仓价格)],np.NaN))
for x in range(len(平仓价格字典["平仓点序列"])):
找到真值索引=平仓价格字典["平仓点序列"][x][平仓价格字典["平仓点序列"][x]==1].index
交集序列索引=b的真值索引&找到真值索引
平仓价格序列[交集序列索引]=平仓价格字典["平仓点价格序列"][x][交集序列索引]
浮盈=算盈利_开平价格不同(a1,b1,开仓价格,平仓价格序列)
b索引=b1[b1==1].index
return 浮盈[b索引]
# def WH_算平仓后盈利金额(a,b,开仓价格,平仓价格):
# a1=pd.Series(np.full([len(开仓价格)],np.NaN))
# b1=pd.Series(np.full([len(开仓价格)],np.NaN))
# a1[a]=1
# b1[b]=1
# a1=a1.fillna(0)
# b1=b1.fillna(0)
# 浮盈=算盈利_开平价格不同(a1,b1,开仓价格,平仓价格)
# b索引=b1[b1==1].index
# return 浮盈[b索引]
def WH_算平仓后盈利金额(a,b,开盘价格):
a1=pd.Series(np.full([len(开盘价格)],np.NaN))
b1=pd.Series(np.full([len(开盘价格)],np.NaN))
a1[a]=1
b1[b]=1
a1=a1.fillna(0)
b1=b1.fillna(0)
浮盈=算盈利(a1,b1,开盘价格)
b索引=b1[b1==1].index
return 浮盈[b索引]
def WH_融合多空开平点(买开点,卖平点,卖开点,买平点):
总序列数=len(买开点)
当前仓位=0
新买开点=[]
新卖平点=[]
新卖开点=[]
新买平点=[]
for x in range(总序列数):
if 当前仓位==0:
if 买开点.iloc[x]:
新买开点.append(1)
新卖平点.append(0)
新卖开点.append(0)
新买平点.append(0)
当前仓位=1
elif 卖开点.iloc[x]:
新买开点.append(0)
新卖平点.append(0)
新卖开点.append(1)
新买平点.append(0)
当前仓位=-1
else:
新买开点.append(0)
新卖平点.append(0)
新卖开点.append(0)
新买平点.append(0)
elif 当前仓位==1:
if 卖平点.iloc[x]:
新买开点.append(0)
新卖平点.append(1)
新卖开点.append(0)
新买平点.append(0)
当前仓位=0
else:
新买开点.append(0)
新卖平点.append(0)
新卖开点.append(0)
新买平点.append(0)
elif 当前仓位==-1:
if 卖平点.iloc[x]:
新买开点.append(0)
新卖平点.append(0)
新卖开点.append(0)
新买平点.append(1)
当前仓位=0
else:
新买开点.append(0)
新卖平点.append(0)
新卖开点.append(0)
新买平点.append(0)
return | pd.Series(新买开点) | pandas.Series |
import logging
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from math import pi
from wordcloud import (WordCloud, get_single_color_func)
import numpy as np
from PIL import Image
import squarify
import os
logger = logging.getLogger('nodes.data_viz')
class SimpleGroupedColorFunc(object):
def __init__(self, color_to_words, default_color):
self.word_to_color = {word: color
for (color, words) in color_to_words.items()
for word in words}
self.default_color = default_color
def __call__(self, word, **kwargs):
return self.word_to_color.get(word, self.default_color)
class GroupedColorFunc(object):
def __init__(self, color_to_words, default_color):
self.color_func_to_words = [
(get_single_color_func(color), set(words))
for (color, words) in color_to_words.items()]
self.default_color_func = get_single_color_func(default_color)
def get_color_func(self, word):
try:
color_func = next(
color_func for (color_func, words) in self.color_func_to_words
if word in words)
except StopIteration:
color_func = self.default_color_func
return color_func
def __call__(self, word, **kwargs):
return self.get_color_func(word)(word, **kwargs)
def plot_decks_colors(client):
query = '''select 1 as uno, d.id as id,
case when string_agg(c.color_identity, '') like '%W%' then 1 else 0 end as white,
case when string_agg(c.color_identity, '') like '%U%' then 1 else 0 end as blue,
case when string_agg(c.color_identity, '') like '%B%' then 1 else 0 end as black,
case when string_agg(c.color_identity, '') like '%R%' then 1 else 0 end as red,
case when string_agg(c.color_identity, '') like '%G%' then 1 else 0 end as green
from deck as d,card as c, deck_card as dc
where dc.deck_id = d.id
and dc.card_id = c.uuid
group by d.id;
'''
decks_colors = pd.read_sql_query(query, client.engine)
df = decks_colors.groupby(by='uno').agg(white=('white', 'sum'),
blue=('blue', 'sum'), black=('black', 'sum'),
red=('red', 'sum'), green=('green', 'sum'))
fig = plt.figure(figsize=(6, 6))
ax = plt.subplot(polar='True')
categories = ['White', 'Blue', 'Black', 'Red', 'Green']
N = len(categories)
values = df.iloc[0].tolist()
values += values[:1]
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
plt.polar(angles, values, marker='.')
plt.fill(angles, values, alpha=0.3)
plt.xticks(angles[:-1], categories)
ax.set_rlabel_position(0)
top = max(df.iloc[0].tolist())
plt.yticks([top / 5, top / 5 * 2, top / 5 * 3, top / 5 * 4, top], color='grey', size=10, labels=[])
plt.ylim(0, top)
plt.savefig('../viz/decks_colors.png')
def plot_nonland_name_cloud(client):
query = '''select case when c.name like '%//%' then split_part(c.name, '//', 1) else c.name end as name
, c.color_identity as color
,sum(dc.amount) as amount
from card as c, deck_card as dc
where c.uuid = dc.card_id
and c.type not like '%Land%'
group by c.name, c.color_identity
order by 3 desc'''
df = pd.read_sql_query(query, client.engine)
names = df.iloc[:, 0].tolist()
count = df.iloc[:, 2].tolist()
d = {}
for index in range(len(names)):
d[names[index]] = count[index]
white = []
blue = []
black = []
red = []
green = []
multi = []
for index, row in df.iterrows():
if row['color'] == 'W':
white.append(row['name'])
elif row['color'] == 'U':
blue.append(row['name'])
elif row['color'] == 'B':
black.append(row['name'])
elif row['color'] == 'R':
red.append(row['name'])
elif row['color'] == 'G':
green.append(row['name'])
elif len(row['color']) > 1:
multi.append(row['name'])
color_to_words = {}
color_to_words['#bdaa00'] = white
color_to_words['#0099d1'] = blue
color_to_words['#9a00bd'] = black
color_to_words['#ff0000'] = red
color_to_words['#119100'] = green
color_to_words['#ff69f5'] = multi
mask = np.array(Image.open('../viz/cards-fan.jpg'))
wc = WordCloud(collocations=False, background_color='white', width=1000, height=800, mask=mask).generate_from_frequencies(d)
default_color = '#424142'
grouped_color_func = SimpleGroupedColorFunc(color_to_words, default_color)
wc.recolor(color_func=grouped_color_func)
plt.figure(figsize=(20, 10))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.tight_layout(pad=0)
plt.savefig('../viz/nonland_name_cloud.png', bbox_inches='tight')
def plot_nonland_name_bar(client):
query = '''select case when c.name like '%//%' then split_part(c.name, '//', 1) else c.name end as name
,case when length(c.color_identity) > 1 then '#ff69f5' else
case when c.color_identity = 'W' then '#bdaa00' else
case when c.color_identity = 'U' then '#0099d1' else
case when c.color_identity = 'B' then '#9a00bd' else
case when c.color_identity = 'R' then '#ff0000' else
case when c.color_identity = 'G' then '#119100' else
'grey'
end
end
end
end
end
end as color
,sum(dc.amount) as amount
from card as c, deck_card as dc
where c.uuid = dc.card_id
and c.type not like '%Land%'
group by c.name, c.color_identity
order by 3 desc
limit 20'''
df = pd.read_sql_query(query, client.engine)
name = df.iloc[:, 0].tolist()
colors = df.iloc[:, 1].tolist()
number = df.iloc[:, 2].tolist()
plt.figure(figsize=(12, 8))
sns.barplot(y=name, x=number, palette=colors)
# set labels
# plt.ylabel("Sets", size=15)
# plt.ylabel("Card count", size=15)
plt.title("Nonland card name count", size=18)
plt.tight_layout()
# ax.set_xticklabels(name)
# plt.subplots_adjust(bottom=0.2)
plt.grid(axis='x')
plt.savefig("../viz/nonland_name_count.png", dpi=100)
def plot_types_square(client):
query = '''select case when c.type like '%//%' then SPLIT_PART(SPLIT_PART(c.type,' // ',1),' — ',1 ) else
SPLIT_PART(c.type,' — ',1) end as basic_type, sum(dc.amount) as amount
from card as c, deck_card as dc
where c.type not like '%Basic Land%'
and c.uuid = dc.card_id
group by basic_type
order by 2 desc;
'''
df = pd.read_sql_query(query, client.engine)
types = df.iloc[:, 0].tolist()
number = df.iloc[:, 1].tolist()
plt.figure(figsize=(17, 8))
types = [x.replace(' ', '\n') for x in types]
squarify.plot(sizes=number, label=types[:9], alpha=0.8, text_kwargs={'fontsize': 16})
plt.axis('off')
plt.tight_layout()
plt.savefig("../viz/types_square.png", dpi=100)
def plot_types_bar(client):
query = '''select case when c.type like '%//%' then SPLIT_PART(SPLIT_PART(c.type,' // ',1),' — ',1 ) else
SPLIT_PART(c.type,' — ',1) end as basic_type, sum(dc.amount) as amount
from card as c, deck_card as dc
where c.type not like '%Basic Land%'
and c.uuid = dc.card_id
group by basic_type
order by 2 desc;'''
df = pd.read_sql_query(query, client.engine)
types = df.iloc[:, 0].tolist()
number = df.iloc[:, 1].tolist()
plt.figure(figsize=(12, 8))
sns.barplot(y=types, x=number, palette='muted')
# set labels
# plt.ylabel("Sets", size=15)
# plt.ylabel("Card count", size=15)
plt.title("Card type count", size=18)
plt.tight_layout()
# ax.set_xticklabels(name)
# plt.subplots_adjust(bottom=0.2)
plt.grid(axis='x')
plt.savefig("../viz/types_count.png", dpi=100)
def plot_set_type_count(client):
query = '''select c.set
,case when c.type like '%//%' then SPLIT_PART(SPLIT_PART(c.type,' // ',1),' — ',1 ) else
SPLIT_PART(c.type,' — ',1) end as basic_type
,sum(dc.amount) as cards
from card as c, deck_card as dc
where c.uuid = dc.card_id
and c.type not like '%Basic Land%' and set in ('znr','iko','m21','thb','eld')
group by c.set, basic_type
order by 1,2;
'''
df = | pd.read_sql_query(query, client.engine) | pandas.read_sql_query |
"""Ethplorer model"""
__docformat__ = "numpy"
import textwrap
from datetime import datetime
from typing import Any, Optional
from time import sleep
import pandas as pd
import requests
from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index
import gamestonk_terminal.config_terminal as cfg
def split_cols_with_dot(column: str) -> str:
"""Split column name in data frame columns whenever there is a dot between 2 words.
E.g. price.availableSupply -> priceAvailableSupply.
Parameters
----------
column: str
Pandas dataframe column value
Returns
-------
str:
Value of column with replaced format.
"""
def replace(string: str, char: str, index: int) -> str:
"""Helper method which replaces values with dot as a separator and converts it to camelCase format
Parameters
----------
string: str
String in which we remove dots and convert it to camelcase format.
char: str
First letter of given word.
index:
Index of string element.
Returns
-------
str:
Camel case string with removed dots. E.g. price.availableSupply -> priceAvailableSupply.
"""
return string[:index] + char + string[index + 1 :]
if "." in column:
part1, part2 = column.split(".")
part2 = replace(part2, part2[0].upper(), 0)
return part1 + part2
return column
def enrich_social_media(dct: dict) -> None:
"""Searching inside dictionary if there are any information about twitter, reddit or coingecko. If yes it
updates dictionary with url to given social media site.
Parameters
----------
dct: dict
Dictionary in which we search for coingecko, twitter or reddit url.
"""
social_media = {
"twitter": "https://www.twitter.com/",
"reddit": "https://www.reddit.com/r/",
"coingecko": "https://www.coingecko.com/en/coins/",
}
for k, v in social_media.items():
if k in dct:
dct[k] = v + dct[k]
def make_request(endpoint: str, address: Optional[str] = None, **kwargs: Any) -> dict:
"""Helper method that handles request for Ethplorer API [Source: https://ethplorer.io/]
Parameters
----------
endpoint: str
endpoint which we want to query e.g. https://api.ethplorer.io/<endpoint><arg>?=apiKey=freekey
address: str
balance argument for given endpoint. In most cases it's tx hash, or eth balance.
kwargs: Any
Additional keywords arguments e.g. limit of transactions
Returns
-------
dict
dictionary with response data
"""
base_url = "https://api.ethplorer.io/"
url = f"{base_url}{endpoint}"
if address:
url = url + "/" + address
url += f"?apiKey={cfg.API_ETHPLORER_KEY}"
if "limit" in kwargs:
url += f"&limit={kwargs['limit']}"
sleep(0.5) # Limit is 2 API calls per 1 sec.
response = requests.get(url).json()
if "error" in response:
raise Exception(
f"Error: {response['error']['code']}. Message: {response['error']['message']}\n",
)
return response
def get_token_decimals(address: str) -> Optional[int]:
"""Helper methods that gets token decimals number. [Source: Ethplorer]
Parameters
----------
address: str
Blockchain balance e.g. 0x1f9840a85d5af5bf1d1762f925bdaddc4201f984
Returns
-------
pd.DataFrame:
DataFrame with list of tokens and their balances.
"""
response = make_request("getTokenInfo", address)
if response and "decimals" in response:
return 10 ** int(response["decimals"])
return None
def get_address_info(address: str) -> pd.DataFrame:
"""Get info about tokens on you ethereum blockchain balance. Eth balance, balance of all tokens which
have name and symbol. [Source: Ethplorer]
Parameters
----------
address: str
Blockchain balance e.g. 0x3cD751E6b0078Be393132286c442345e5DC49699
Returns
-------
pd.DataFrame:
DataFrame with list of tokens and their balances.
"""
response = make_request("getAddressInfo", address)
if "tokens" in response:
tokens = response.pop("tokens")
for token in tokens:
token_info = token.pop("tokenInfo")
token.update(
{
"tokenName": token_info.get("name"),
"tokenSymbol": token_info.get("symbol"),
"tokenAddress": token_info.get("balance"),
"balance": token.get("balance")
/ (10 ** int(token_info.get("decimals"))),
}
)
else:
token_info = response.get("tokenInfo") or {}
tokens = [
{
"tokenName": token_info.get("name"),
"tokenSymbol": token_info.get("symbol"),
"tokenAddress": token_info.get("balance"),
"balance": token_info.get("balance")
/ (10 ** int(token_info.get("decimals"))),
}
]
eth = response["ETH"] or {}
eth_balance = eth.get("balance")
eth_row = [
"Ethereum",
"ETH",
"0x0000000000000000000000000000000000000000",
eth_balance,
]
cols = [
"tokenName",
"tokenSymbol",
"tokenAddress",
"balance",
]
df = pd.DataFrame(tokens)[cols]
eth_row_df = pd.DataFrame([eth_row], columns=cols)
df = pd.concat([eth_row_df, df], ignore_index=True)
df = df[df["tokenName"].notna()]
create_df_index(df, "index")
return df
def get_top_tokens() -> pd.DataFrame:
"""Get top 50 tokens. [Source: Ethplorer]
Returns
-------
pd.DataFrame:
DataFrame with list of top 50 tokens.
"""
response = make_request("getTopTokens")
tokens = response["tokens"]
df = pd.DataFrame(tokens)[
[
"name",
"symbol",
"price",
"txsCount",
"transfersCount",
"holdersCount",
"twitter",
"coingecko",
]
]
df["price"] = df["price"].apply(lambda x: x["rate"] if x and "rate" in x else None)
create_df_index(df, "rank")
return df
def get_top_token_holders(address) -> pd.DataFrame:
"""Get info about top token holders. [Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
Returns
-------
pd.DataFrame:
DataFrame with list of top token holders.
"""
response = make_request("getTopTokenHolders", address, limit=100)
df = pd.DataFrame(response["holders"])
sleep(0.5)
token_decimals_divider = get_token_decimals(address)
if token_decimals_divider:
df["balance"] = df["balance"] / token_decimals_divider
return df
def get_address_history(address) -> pd.DataFrame:
"""Get information about balance historical transactions. [Source: Ethplorer]
Parameters
----------
address: str
Blockchain balance e.g. 0x3cD751E6b0078Be393132286c442345e5DC49699
Returns
-------
pd.DataFrame:
DataFrame with balance historical transactions (last 100)
"""
response = make_request("getAddressHistory", address, limit=100)
operations = response.pop("operations")
if operations:
for operation in operations:
token = operation.pop("tokenInfo")
if token:
operation["token"] = token["name"]
operation["tokenAddress"] = token["address"]
operation["decimals"] = int(token["decimals"])
operation["timestamp"] = datetime.fromtimestamp(operation["timestamp"])
df = | pd.DataFrame(operations) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import qiime2
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from biom.table import Table
from q2_feature_table import filter_seqs
class FilterSeqsTests(unittest.TestCase):
def setUp(self):
self.seqs = pd.Series(['ACGT', 'GCTA', 'CCCC', 'TGTT'],
index=['O1', 'O2', 'O3', 'O4'])
self.df_lite = pd.DataFrame(
[['A'], ['C'], ['G'], ['T']],
index=pd.Index(['O1', 'O2', 'O3', 'O4'], name='id'),
columns=['seq'])
md_full = pd.DataFrame(
[['foo', '1'], ['bar', '2'], ['baz', '3'], ['foo', '4']],
index=['O1', 'O2', 'O3', 'O4'], columns=['stuff', 'some_numbers'])
md_full.index.name = 'FeatureID'
self.md_full = qiime2.Metadata(md_full)
def filter_and_assertEqual(self, exp, md=None, exclude_ids=False,
where=None):
if md is None:
md = self.md_full
obs = filter_seqs(self.seqs, metadata=md,
exclude_ids=exclude_ids, where=where)
assert_series_equal(exp, obs)
def test_id_based_filtering(self):
# filter none
self.filter_and_assertEqual(self.seqs,
md=qiime2.Metadata(self.df_lite))
# filter one
md = qiime2.Metadata(self.df_lite.drop(['O1']))
exp = pd.Series(['GCTA', 'CCCC', 'TGTT'], index=['O2', 'O3', 'O4'])
self.filter_and_assertEqual(exp, md=md)
# filter all
md = qiime2.Metadata(pd.DataFrame({},
index=pd.Index(['foo'], name='id')))
with self.assertRaisesRegex(ValueError, 'All.*filtered'):
filter_seqs(self.seqs, metadata=md)
# exclude none
md = qiime2.Metadata(pd.DataFrame({},
index=pd.Index(['foo'], name='id')))
self.filter_and_assertEqual(self.seqs, md=md, exclude_ids=True)
# exclude one
md = qiime2.Metadata(self.df_lite.drop(['O1', 'O2', 'O3']))
exp = | pd.Series(['ACGT', 'GCTA', 'CCCC'], index=['O1', 'O2', 'O3']) | pandas.Series |
import sys
import argparse as arg
import pandas as pd
import os
import ggStorage as gg
from datetime import datetime
import datetime
import numpy as np
from colorama import Fore, Style, init
init(convert=True)
# Arguments **
parser = arg.ArgumentParser()
parser.add_argument("-v", "--verbose" , help="Show debug info", action="store_true")
parser.add_argument("-f", "--file" , help="File with the chosen products")
parser.add_argument("-o", "--out" , help="Output directory")
parser.add_argument("-s", "--start_date" , help="Start date")
parser.add_argument("-e", "--end_date" , help="End date")
parser.add_argument("-c", "--cloud" , help="Maximum cloud cover")
parser.add_argument("-l", "--list_mgrs" , help="Tiles corresponding to coordinates MGRS, format: -l ", type=list, nargs='*')
args = parser.parse_args()
verbose = args.verbose
if not args.file:
print(Fore.RED + f'You have to enter the file with the catalog')
exit(-1)
if not os.path.isfile(args.file):
print(Fore.RED + f'Catalog file does not exist')
exit(-1)
if not args.out:
print(Fore.RED + f'You have to enter the output path file')
exit(-1)
else:
if args.out[-3:] != 'csv':
print(Fore.RED + 'You have to write an output csv')
exit(-1)
print('\n\n')
if verbose:
print('Loading the catalog into memory... ', end="")
sel = ['PRODUCT_ID', 'MGRS_TILE', 'SENSING_TIME', 'CLOUD_COVER', 'BASE_URL']
#MGRS = ['31TEE', '31TDE', '31SDD', '31SED']
ch = 1000
df = pd.read_csv(args.file, chunksize=ch) # Abrimos el catálogo en bloques de 1000 en 1000
df = | pd.concat([aux[sel] for aux in df]) | pandas.concat |
try:
import alpaca_trade_api
except:
print("!pip install PyYAML==5.4.1 --ignore-installed")
print("!pip install alpaca_trade_api")
try:
import stldecompose
except:
print("!pip install scipy==1.2.1")
print("!pip install statsmodels==0.10.2")
print("!pip install stldecompose==0.0.5")
try:
import xgboost
except:
print("!pip install PyYAML==5.4.1 --ignore-installed")
from sklearn.preprocessing import MinMaxScaler
import Common.ApiClient as ac
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from stldecompose import decompose
from sklearn.svm import LinearSVC
from stldecompose.forecast_funcs import mean
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from scipy.signal import argrelextrema
import warnings
warnings.filterwarnings("ignore")
class STL_strategy():
def __init__ (self,ticker,df,column='close',cycle=10,period=2):
self.ticker = ticker
self.column = column
self.period = period
self.cycle = cycle
self.df = df
self.df.index= | pd.to_datetime(df.index,utc=True) | pandas.to_datetime |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
import json
from bs4 import BeautifulSoup
import requests
from tqdm import tqdm
def timestamp2date(timestamp):
# function converts a Uniloc timestamp into Gregorian date
return datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d')
def date2timestamp(date):
# function coverts Gregorian date in a given format to timestamp
return datetime.strptime(date, '%Y-%m-%d').timestamp()
def getCryptoOHLC(fsym, tsym):
# function fetches a crypto price-series for fsym/tsym and stores
# it in pandas DataFrame
cols = ['date', 'timestamp', 'open', 'high', 'low', 'close']
lst = ['time', 'open', 'high', 'low', 'close']
timestamp_today = datetime.today().timestamp()
curr_timestamp = timestamp_today
for j in range(2):
df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# All submission files were downloaded from different public kernels
# See the description to see the source of each submission file
submissions_path = "../safe_driver_prediction/submissions"
all_files = os.listdir(submissions_path)
# Read and concatenate submissions
outs = [pd.read_csv(os.path.join(submissions_path, f), index_col=0)\
for f in all_files]
concat_df = | pd.concat(outs, axis=1) | pandas.concat |
#!/usr/bin/env python3
f = open('sao.txt', 'r')
txt = f.read()
lns = txt.split('\n')
d = {}
for c in txt:
if c not in d.keys(): d[c] = 0
d[c] = d[c] + 1
wrds = [(k, d[k]) for k in d.keys()]
swd = sorted(wrds, key=lambda p: p[1], reverse=True)
from pandas import DataFrame
from sys import stdout
dd0 = DataFrame()
dd0['chr'] = [w[0] for w in swd]
dd0['n'] = [w[1] for w in swd]
def color(t, ac): return "\x1b[{}m{}\x1b[0m".format(ac, t)
print(dd0.describe())
print(dd0.head(10))
stdout.write('==\t{}\t=='.format(color('Char Based', 33)))
print(''.join([x[0] for x in swd ]))
import jieba
dd = | DataFrame() | pandas.DataFrame |
### Author <NAME> - 29 September 2020 ###
import pandas as pd
import numpy as np
import json
from gooey import Gooey, GooeyParser
import _pickle as cPickle
from collections import Counter
import warnings
import webbrowser
import time
from sklearn.ensemble import RandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
import sys
import os
path_main = "/".join(os.path.realpath(__file__).split("/")[:-1])
sys.path.append(path_main + '/Classes/')
sys.path.append(path_main + '/Utils/')
from media_class import Medium, Supplement, GrowthMedium, Medium_one_hot, Supplement_one_hot, GrowthMedium_one_hot
from gene_one_hot import one_hot
from help_functions import mean, str_to_bool, str_none_check
from message import display_message
@Gooey(dump_build_config=False,
program_name="CellCulturePy",
richtext_controls=True,
required_cols=3,
optional_cols=1,
default_size=(1300, 800))
def main():
'''
'''
Cache_pickle = pd.read_pickle('Data/cache_features.pkl')
with open("Data/cache_features.json", "r") as read_file:
Cache_json = json.load(read_file)
# Assign variables
TumorType_arg = Cache_json["cache_diseases_highest"]
TumorType_arg.sort()
Tissue_arg = Cache_json["cache_tumor_site"]
Tissue_arg.sort()
parser = GooeyParser(description="Predicting media conditions with genomic profile")
parser.add_argument("TumorType", help="what is your tumor type", choices=TumorType_arg)
parser.add_argument("Tissue", help="what is your tissue type", choices=Tissue_arg)
parser.add_argument("Dimension", help="what is your growing dimension", choices=Cache_json["cache_dimension"])
parser.add_argument("maf", help="Select maf file from TWIST (mutect1 or 2)", widget="FileChooser")
parser.add_argument("cnv", help="Select cnv file from TWIST (.tumor.called)", widget="FileChooser")
parser.add_argument("-m", dest="Media", nargs='+', default=False, choices=Cache_json["cache_media"], help="you can select one or multiple media types you want to look for", widget="Listbox")
parser.add_argument("-s", dest="Supplements", action="store_true", default=False, help="Do you want to include looking for supplements (default: No)")
args = parser.parse_args()
display_message(part=1)
predict(Cache_json=Cache_json, Cache_pickle=Cache_pickle, TumorType=args.TumorType, Tissue=args.Tissue, Dimension=args.Dimension, maf=args.maf, cnv=args.cnv, media=str_to_bool(args.Media), supplements=False)
display_message(part=2)
#Displaying
path_main = ("/".join(os.path.realpath(__file__).split("/")[:-1]))
webbrowser.open('file://' + path_main + "/tmp.html")
time.sleep(5)
os.remove(path_main + "/tmp.html")
def maf_extract(maf):
'''
'''
id_ = []
data_dict= {}
file_name = maf.split('/')[-1]
i = 0
with open(maf, 'r', encoding="latin-1") as f:
try:
for line in f:
if line.startswith("#"):
continue
elif not id_:
id_ = line.replace('\n', '').split('\t')
else:
data_dict[i] = line.replace('\n', '').split('\t')
i += 1
except:
warnings.warn(f"File: {file_name}, had problems with unrecognizable symbols", DeprecationWarning)
maf_frame = pd.DataFrame.from_dict(data_dict, orient="index", columns=id_)
maf_frame = maf_frame[~maf_frame["Variant_Classification"].isin(["Intron", "lincRNA", "IGR", "5'Flank", "5'UTR", "Silent", "3'UTR", "RNA"])]
return maf_frame, file_name
def cnv_extract(cnv):
'''
'''
file_name = cnv.split('/')[-1]
cnv_frame = pd.read_csv(cnv, sep="\t")
chromosomelist = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y"]
cnv_data = {}
for chromosoom in chromosomelist:
cnv_tmp = cnv_frame[cnv_frame["Chromosome"] == chromosoom]
if type(cnv_tmp) == pd.core.series.Series or cnv_tmp.empty == True:
cnv_data[chromosoom] = 1
elif len(cnv_tmp) > 1:
cnv_data[chromosoom] = (sum(cnv_tmp["Num_Probes"] * cnv_tmp["Segment_Mean"]) / sum(cnv_tmp["Num_Probes"]))
else:
cnv_data[chromosoom] = cnv_tmp["Segment_Mean"].tolist()[0]
cnv_data = | pd.Series(cnv_data, name='Value') | pandas.Series |
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
import unittest
import nose
import numpy as np
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
assert(type(model1) == type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest
def testOLSWithDatasets(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
self.checkDataSet(sm.datasets.copper.load())
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s
def testWLS(self):
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1/weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1/aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start : end]
endog = dataset.endog[start : end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(unittest.TestCase):
'''
For test coverage with faux data
'''
@classmethod
def setupClass(cls):
if not _have_statsmodels:
raise nose.SkipTest
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assert_(model1.r2 != model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assert_((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = | tm.makeTimeDataFrame() | pandas.util.testing.makeTimeDataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
| tm.assert_isinstance(self.dups.index, DatetimeIndex) | pandas.util.testing.assert_isinstance |
# -*- coding: utf-8 -*-
"""
Part of slugdetection package
@author: <NAME>
github: dapolak
"""
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from slugdetection.Slug_Detection import Slug_Detection
import unittest
class Test_Slug_Detection(unittest.TestCase):
"""
Unitest class for the Slug Detection class
"""
def test_create_class(self, spark_data):
"""
Unit test for class creation
Parameters
----------
spark_data : Spark data frame
well data frame
"""
test_class = Slug_Detection(spark_data)
assert hasattr(test_class, "well_df"), "Assert well_df attribute is created"
assert len(test_class.well_df.head(1)) != 0, \
"well_df attribute not empty" # Pyspark has no clear empty attribute
def test_jump(self, spark_data):
"""
Unit test for jump method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.jump()
assert 'count_id' in test_class.pd_df.columns, "Assert new count_id column was created"
assert test_class.pd_df['count_id'].nunique() >= 3, \
"For this example, assert that there are three continuous sets of data"
def test_clean_short_sub(self, spark_data):
"""
Unit test for clean_short_sub method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.jump()
a = len(test_class.pd_df) # Store length of pd_df data frame
test_class.clean_short_sub(min_df_size=200) # Apply clean_short_sub method
b = len(test_class.pd_df) # Store length of pd_df data frame
assert a > b, "For this example, the post clean_short_sub pd_df attribute should be shorter"
def test_sub_data(self, spark_data):
"""
Unit test for clean_short_sub method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.sub_data(min_df_size=200)
assert hasattr(test_class, "sub_df_dict"), "New attribute must have been created"
a = test_class.pd_df["count_id"].nunique()
assert a == len(test_class.sub_df_dict), "Number of unique count ids must be the same as number of data " \
"frames in sub_df_dict dictionary"
a = test_class.sub_df_dict[0] # Get first element of the dictionary
assert isinstance(a, pd.DataFrame), "sub_df_dict elements are pandas data frames"
for f in test_class.features:
assert f in a.columns, "data frame must contain all features"
def test_slug_check(self, spark_data):
"""
Unit test for slug_check method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="18-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.sub_data()
## Test 1 : Test that slug_check returns right value
##
# Create fake dataframe
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(1000)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = (np.sin(x) * 3) + 10 # Create sin wave array (slug-like)
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = pd.to_datetime(fake_df["ts"]) # Ensure timestamp are datetime type
fake_df["WH_P"] = whp_list # Add sine wave as WHP data
test_class.sub_df_dict = {
1: fake_df
} # Override sub_df_dict attribute with fake data frame
slug_idx = pd.Series(whp_list)[whp_list > 12.90].index.tolist() # Create list of slug peaks for fake slugs
first = test_class.slug_check(slug_idx, 1) # Get results from slug_check method
assert len(first) == 1, "First slug index list should only contain one value in this example"
## Test 2 : Test that slug_check returns right value
##
# Create fake data frame
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(2300)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = (np.sin(x) * 3) + 10 # Create sin wave array (slug-like)
whp_list = np.append(whp_list, [10 for i in range(300)]) # Add flat flow to simulate normal flow
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add more slugs
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = pd.to_datetime(fake_df["ts"]) # Ensure timestamp are datetime type
fake_df["WH_P"] = whp_list # Add fake whp data
slug_idx = pd.Series(whp_list)[whp_list > 12.90].index.tolist() # Create list of slug peaks
test_class.sub_df_dict = {
1: fake_df
} # Override sub_df_dict attribute with fake data frame
first = test_class.slug_check(slug_idx, 1) # Get results from slug_check method
assert first, "First slug index list should not be empty"
assert len(first) == 2, "First slug index list should only contain two value in this example"
assert first[1] == 1305, "In this example, the second first slug of the data set occurs at minutes = 1305"
def test_label_slugs(self, spark_data):
"""
Unit test for label_slugs method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="30-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
try:
f, s = test_class.label_slugs()
print("Sub df dict attribute has not been created")
raise ValueError
except AssertionError:
pass
test_class.sub_data() # Create sub df dict
# create fake data set
datetime_format = '%d-%b-%y %H:%M'
base = datetime.strptime("01-JAN-16 09:09", datetime_format)
date_list = [[base + timedelta(minutes=x)] for x in range(1000)] # Creat time, one minute appart
x = np.linspace(0, 100 * np.pi, 1000)
whp_list = (np.sin(x) * 3) + 10 # create sin wave
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str)
fake_df["ts"] = pd.to_datetime(fake_df["ts"])
fake_df["WH_P"] = whp_list
# overide
test_class.sub_df_dict = {
1: fake_df,
2: pd.DataFrame(data=[[0, 0], [0, 0]], columns=["ts", "WH_P"])
}
# This should create
f, s = test_class.label_slugs()
assert s, "Assert slug index list is not empty"
assert f, "Assert first slug index list not empty"
assert len(s[0]) == 49, "In this example, there should be 50 slug peaks"
assert len(s) == 2, "In this example, there should be one list of slug peaks"
def test_format_data(self, spark_data):
"""
Unit test for format_data method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="18-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
try:
f, s = test_class.label_slugs()
print("Sub df dict attribute has not been created")
raise ValueError
except AssertionError:
pass
test_class.sub_data() # Create sub df dict
## Example 1
##
# create fake data set
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(2600)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = np.array([10 for i in range(300)]) # Create whp list with normal flow behaviour
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add sin wave array (slug-like)
whp_list = np.append(whp_list, [10 for i in range(300)]) # Add flat flow to simulate normal flow
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add more slugs
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = | pd.to_datetime(fake_df["ts"]) | pandas.to_datetime |
"""
Multi object tracking results and ground truth
- conversion,
- evaluation,
- visualization.
For more help run this file as a script with --help parameter.
PyCharm debugger could have problems debugging inside this module due to a bug:
https://stackoverflow.com/questions/47988936/debug-properly-with-pycharm-module-named-io-py
workaround: rename the file temporarily
TODO: merge with utils.gt.gt
"""
import warnings
import numpy as np
import pandas as pd
import tqdm
from .bbox_mot import BboxMot
from .mot import Mot
from .posemot import PoseMot
metrics_higher_is_better = ["idf1", "idp", "idr", "recall", "precision", "mota"]
metrics_lower_is_better = [
"num_false_positives",
"num_misses",
"num_switches",
"num_fragmentations",
"motp",
"motp_px",
]
def load_any_mot(filename_or_buffer):
df = pd.read_csv(filename_or_buffer, nrows=2)
try:
filename_or_buffer.seek(0)
except AttributeError:
pass
try:
for s in df.columns:
float(s)
bbox_mot = True
except ValueError:
bbox_mot = False
if bbox_mot:
mot = BboxMot(filename_or_buffer=filename_or_buffer)
elif "keypoint" in df.columns:
mot = PoseMot(filename_or_buffer=filename_or_buffer)
else:
mot = Mot(filename_or_buffer=filename_or_buffer)
return mot
def load_idtracker(filename_or_buffer):
"""
Load idTracker results.
Example trajectories.txt:
X1 Y1 ProbId1 X2 Y2 ProbId2 X3 Y3 ProbId3 X4 Y4 ProbId4 X5 Y5 ProbId5
459.85 657.37 NaN 393.9 578.17 NaN 603.95 244.9 NaN 1567.3 142.51 NaN 371.6 120.74 NaN
456.43 664.32 NaN 391.7 583.05 NaN 606.34 242.57 NaN 1565.3 138.53 NaN 360.93 121.86 NaN
453.22 670.03 NaN 389.63 587.08 NaN 608.41 240.66 NaN 1566.8 132.25 NaN 355.92 122.81 NaN
...
:param filename_or_buffer: idTracker results (trajectories.txt or trajectories_nogaps.txt)
:return: DataFrame with frame id x y width height confidence columns
"""
df = pd.read_csv(filename_or_buffer, delim_whitespace=True)
df.index += 1
n_animals = len(df.columns) // 3
for i in range(1, n_animals + 1):
df[i] = i
df["frame"] = df.index
objs = []
for i in range(1, n_animals + 1):
objs.append(
df[["frame", i, "X" + str(i), "Y" + str(i)]].rename(
{"X" + str(i): "x", "Y" + str(i): "y", i: "id"}, axis=1
)
)
df_out = pd.concat(objs)
df_out.sort_values(["frame", "id"], inplace=True)
df[df.isna()] = -1
df_out["width"] = -1
df_out["height"] = -1
df_out["confidence"] = -1
return df_out
def load_idtrackerai(filename_or_buffer):
"""
Load idtracker.ai results
:param filename_or_buffer: idTracker results (trajectories.txt or trajectories_nogaps.txt)
:return: DataFrame with frame id x y width height confidence columns
"""
traj_ndarray = np.load(filename_or_buffer, allow_pickle=True)
traj_dict = traj_ndarray.item()
n_frames, n_ids, _ = traj_dict["trajectories"].shape
frames = np.repeat(np.arange(1, n_frames + 1), n_ids).reshape(n_frames, n_ids, 1)
obj_ids = np.tile(np.arange(1, n_ids + 1), n_frames).reshape(n_frames, n_ids, 1)
df = pd.DataFrame(
np.concatenate((frames, obj_ids, traj_dict["trajectories"]), axis=2).reshape(
(n_frames * n_ids, 4)
),
columns=["frame", "id", "x", "y"],
)
df = df.astype({"frame": "int", "id": "int"})
df[df.isna()] = -1
df["width"] = -1
df["height"] = -1
df["confidence"] = -1
return df
def load_toxtrac(filename_or_buffer, topleft_xy=(0, 0)):
"""
Load ToxTrack results.
Example Tracking_0.txt:
0 0 1 194.513 576.447 1
1 0 1 192.738 580.313 1
2 0 1 190.818 584.126 1
3 0 1 188.84 588.213 1
4 0 1 186.78 592.463 1
Documentation of the file format is in
[ToxTrac: a fast and robust software for tracking organisms](https://arxiv.org/pdf/1706.02577.pdf) page 33.
:param filename_or_buffer: Toxtrac results (Tracking_0.txt)
:param topleft_xy: tuple, length 2; xy coordinates of the arena top left corner
:return: DataFrame with frame id x y width height confidence columns
"""
df = pd.read_csv(
filename_or_buffer,
delim_whitespace=True,
names=["frame", "arena", "id", "x", "y", "label"],
usecols=["frame", "id", "x", "y"],
)
df["frame"] += 1 # MATLAB indexing
df["x"] += topleft_xy[0]
df["y"] += topleft_xy[1]
df = df.assign(width=-1)
df = df.assign(height=-1)
df = df.assign(confidence=-1)
df.sort_values(["frame", "id"], inplace=True)
df[df.isna()] = -1
return df
def load_sleap_analysis_as_posemot(filename_or_buffer, num_objects=None):
"""
:param filename_or_buffer:
:param num_objects:
:return: PoseMot() nans where object is not present
"""
import h5py
f = h5py.File(filename_or_buffer, "r")
# occupancy_matrix = f['track_occupancy'][:]
try:
tracks_matrix = f["tracks"][:] # noqa: F841
except KeyError:
print(
f'File {filename_or_buffer} doesn\'t appear to be SLEAP "analysis" file.\n'
f"Export analysis from sleap-label using File -> Export Analysis HDF5.\n"
)
raise
if num_objects is None:
num_objects = f["tracks"].shape[0]
mot = PoseMot()
mot.init_blank(
range(f["tracks"].shape[3]), range(num_objects), f["tracks"].shape[2]
)
mot.ds["x"].values = np.moveaxis(f["tracks"][:num_objects, 0, :, :], 2, 0)
mot.ds["y"].values = np.moveaxis(f["tracks"][:num_objects, 1, :, :], 2, 0)
mot.marker_radius = 8
return mot
def load_sleap_as_dataframe(filename):
try:
import sleap
except ImportError as exception:
exception.msg = """
io.load_sleap_to_dataframe() requires the sleap module installed. Either install the module or export analysis file from
sleap-label application and use load_posemot_sleap_analysis() without additional dependencies.
"""
raise exception
labels = sleap.load_file(filename)
points = []
for frame in tqdm.tqdm(labels):
for instance in frame:
for node_name, point in zip(labels.skeleton.node_names, instance):
try:
score = point.score
except AttributeError:
score = -1
if isinstance(instance, sleap.instance.PredictedInstance):
instance_class = 'predicted'
elif isinstance(instance, sleap.instance.Instance):
instance_class = 'manual'
else:
assert False, 'unknown instance type: {}'.format(type(instance))
points.append((point.x, point.y, score, point.visible, node_name, instance.frame_idx,
instance.track.name, instance_class, instance.video.backend.filename))
df = pd.DataFrame(points, columns=['x', 'y', 'score', 'visible', 'bodypart', 'frame',
'track', 'source', 'video'])
df['keypoint'] = df.bodypart.apply(labels.skeleton.node_names.index)
return df
def load_sleap_as_posemot(filename):
df = load_sleap_as_dataframe(filename)
df['id'] = df.track.str.split('_', expand=True)[1].astype(int) # default SLEAP track naming "track_<num>"
df = df.rename(columns={'score': 'confidence'})
df = df.set_index(["frame", "id", "keypoint"])
# remove duplicated instance with preference to manually annotated
df_predicted = df.query('source == "predicted"')
df_manual = df.query('source == "manual"')
df_unique = df_predicted.copy()
df_unique.loc[df_manual.index] = df_manual
assert df_unique.index.is_unique
return PoseMot.from_df(df_unique.reset_index())
def save_mot(filename, df):
df.to_csv(filename, index=False) # header=False,
def load_mot(filepath_or_buffer):
"""
Load Multiple Object Tacking Challenge trajectories file.
:param filepath_or_buffer: mot filename_or_buffer or buffer
:return: DataFrame, columns frame and id start with 1 (MATLAB indexing)
"""
df = pd.read_csv(
filepath_or_buffer, index_col=["frame", "id"]
) # names=[u'frame', u'id', u'x', u'y', u'width', u'height', u'confidence']
return df[(df.x != -1) & (df.y != -1)]
def mot_in_roi(df, roi):
"""
Limit MOT to a region of interest.
:param df: MOT trajectories, DataFrame
:param roi: utils.roi.ROI
:return: MOT trajectories, DataFrame
"""
idx_in_roi = (
(df.x >= roi.x())
& (df.y >= roi.y())
& (df.x < roi.x() + roi.width())
& (df.y < roi.y() + roi.height())
)
return df[idx_in_roi]
def eval_mot(df_gt, df_results, sqdistth=10000):
"""
Evaluate trajectories by comparing them to a ground truth.
:param df_gt: ground truth DataFrame, columns <frame>, <id>, <x>, <y>; <frame> and <id> are 1-based; see load_mot
:param df_results: result trajectories DataFrame, format same as df_gt
:param sqdistth: square of the distance threshold, only detections and ground truth objects closer than
the threshold can be matched
:return: (summary DataFrame, MOTAccumulator)
"""
nan_mask = (
(df_results.x == -1)
| (df_results.x == -1)
| df_results.x.isna()
| df_results.y.isna()
)
if len(df_results[nan_mask]) > 0:
warnings.warn("stripping nans from the evaluated trajectories")
df_results = df_results[~nan_mask]
import motmetrics as mm
from motmetrics.utils import compare_to_groundtruth
acc = compare_to_groundtruth(
df_gt, df_results, dist="euc", distfields=["x", "y"], distth=sqdistth
)
mh = mm.metrics.create()
# remove id_global_assignment metric, workaround for https://github.com/cheind/py-motmetrics/issues/19
metrics = mh.names[:]
metrics.remove("id_global_assignment")
return mh.compute(acc, metrics), acc # metrics=mm.metrics.motchallenge_metrics
def eval_and_save(ground_truth, mot_results, out_csv=None, results_keypoint=None):
"""
Evaluate results and save metrics.
:param ground_truth: ground truth filename_or_buffer (MOT format), buffer or Mot object
:param mot_results: results filename_or_buffer (MOT format), buffer or Mot
:param out_csv: output file with a summary (filename_or_buffer or buffer)
:param results_keypoint: keypoint used for evaluation of keypoint/pose data against centroid ground truth
"""
try:
df_gt = ground_truth.to_dataframe()
except AttributeError:
df_gt = load_mot(ground_truth)
try:
df_results = mot_results.to_dataframe()
except AttributeError:
df_results = load_any_mot(mot_results).to_dataframe()
if results_keypoint is not None:
df_results = df_results[df_results.keypoint == results_keypoint]
df_gt = df_gt.rename(columns={"frame": "FrameId", "id": "Id"}).set_index(["FrameId", "Id"])
df_results = df_results.rename(columns={"frame": "FrameId", "id": "Id"}).set_index(["FrameId", "Id"])
print("Evaluating...")
summary, acc = eval_mot(df_gt, df_results)
summary["motp_px"] = np.sqrt(
summary["motp"]
) # convert from square pixels to pixels
import motmetrics as mm
# mh = mm.metrics.create()
print(mm.io.render_summary(summary))
if out_csv is not None:
summary.to_csv(out_csv, index=False)
def array_to_mot_dataframe(results):
"""
Create MOT challenge format DataFrame out of 3 dimensional array of trajectories.
:param results: ndarray, shape=(n_frames, n_animals, 2 or 4); coordinates are in yx order, nan when id not present
:return: DataFrame with frame, id, x, y, width, height and confidence columns
"""
assert results.ndim == 3
assert results.shape[2] == 2 or results.shape[2] == 4
objs = []
columns = ["x", "y"]
indices = [1, 0]
if results.shape[2] == 4:
columns.extend(["width", "height"])
indices.extend([3, 2])
for i in range(results.shape[1]):
df = pd.DataFrame(results[:, i, indices], columns=columns)
df["frame"] = list(range(1, results.shape[0] + 1))
df = df[~(df.x.isna() | df.y.isna())]
df["id"] = i + 1
df = df[["frame", "id"] + columns]
objs.append(df)
df = | pd.concat(objs) | pandas.concat |
# import app components
from app import app, data
from flask_cors import CORS
CORS(app) # enable CORS for all routes
# import libraries
from flask import request
import pandas as pd
import re
from datetime import datetime
from functools import reduce
# define functions
## process date args
def date_arg(arg):
try:
arg = datetime.strptime(arg, '%d-%m-%Y')
except:
try:
arg = datetime.strptime(arg, '%Y-%m-%d')
except:
arg = None
return arg
## process missing arg
def missing_arg(missing):
if missing == 'na':
missing_val = 'NA'
elif missing == 'empty':
missing_val = ''
elif missing == 'nan':
missing_val = 'NaN'
else:
missing_val = 'NULL'
return(missing_val)
## get date column
def get_date_col(df):
return list(filter(re.compile('^date_.*').search, df.columns.values))[0]
# list of dataset by location
data_canada = ['cases_timeseries_canada',
'mortality_timeseries_canada',
'recovered_timeseries_canada',
'testing_timeseries_canada',
'active_timeseries_canada',
'vaccine_administration_timeseries_canada',
'vaccine_distribution_timeseries_canada',
'vaccine_completion_timeseries_canada']
data_prov = ['cases_timeseries_prov',
'mortality_timeseries_prov',
'recovered_timeseries_prov',
'testing_timeseries_prov',
'active_timeseries_prov',
'vaccine_administration_timeseries_prov',
'vaccine_distribution_timeseries_prov',
'vaccine_completion_timeseries_prov']
data_hr = ['cases_timeseries_hr',
'mortality_timeseries_hr']
data_names = ['cases',
'mortality',
'recovered',
'testing',
'active',
'avaccine',
'dvaccine',
'cvaccine']
data_sknew = ['sk_new_cases_timeseries_hr_combined',
'sk_new_mortality_timeseries_hr_combined']
data_names_dates = {
'date_report': 'cases',
'date_death_report': 'mortality',
'date_recovered': 'recovered',
'date_testing': 'testing',
'date_active': 'active',
'date_vaccine_administered': 'avaccine',
'date_vaccine_distributed': 'dvaccine',
'date_vaccine_completed': 'cvaccine'
}
data_other = {
'prov': 'prov_map',
'hr': 'hr_map',
'age_cases': 'age_map_cases',
'age_mortality': 'age_map_mortality'
}
@app.route('/')
@app.route('/index')
def index():
# initialize response
response = {}
# subset dataframes
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date', regex = True)
# subset active dataframe to avoid duplicate columns
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert date column and filter to most recent date
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df.loc[df['date'] == data.version['date']]
# format output
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna('NULL')
response['summary'] = df.to_dict(orient='records')
# add version to response
response['version'] = data.version['version']
# return response
return response
@app.route('/timeseries')
def timeseries():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes
if loc == 'canada':
if stat == 'cases':
data_name = data_canada[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_canada[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_canada[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_canada[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_canada[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_canada[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_canada[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_canada[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
elif loc == 'prov' or loc in data.keys_prov.keys():
if stat == 'cases':
data_name = data_prov[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_prov[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_prov[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_prov[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_prov[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_prov[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_prov[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_prov[7]
dfs = [ | pd.read_csv(data.ccodwg[data_name]) | pandas.read_csv |
#!/usr/bin/envthon
# coding: utf-8
#import requests
#from bs4 import BeautifulSoup
#from pandas import DataFrame
#import re
#import datetime
import argparse
import os
import pandas as pd
#from internal_processing import get_job_details, get_name_and_loc, get_posted_and_applicants
#from internal_processing import get_job_title, get_job_id, get_job_description
from file_processing import get_files, rename_files_and_dirs, get_paths
#from helpers import job_detail_keys, get_job_details_dc, clean_dict, get_deutsch, get_adj_date
from db_processing import get_jobs_wrapper, get_compare_master, update_master
def main(directory, master_db, output_db=None, verbose=False):
"""
"""
open_dir = directory+'Open/'
# Get the files to process
files = get_files(open_dir)
# rename the files (if needed)
rename_files_and_dirs(files, open_dir, verbose)
pdf_master = pd.DataFrame()
if master_db:
if os.path.exists(master_db):
pdf_master = | pd.read_csv(master_db) | pandas.read_csv |
import os
import pandas as pd
import s3fs
def fetch_data(tables_to_download, data_path):
s3bucket = "twde-datalab/"
# Load all tables from raw data
if not os.path.exists(data_path + 'raw'):
os.makedirs(data_path + 'raw')
for t in tables_to_download:
key = "raw/{table}.csv".format(table=t)
if not os.path.exists(data_path + key):
print("Downloading data from {}".format(key))
s3 = s3fs.S3FileSystem(anon=True)
try:
s3.get(s3bucket + key, data_path + key)
except OSError.FileNotFoundError:
print("Could not find {0}. Was this generated locally?".
format(key))
def load_data(tables_to_load, data_path):
tables = {}
for t in tables_to_load:
key = "raw/{table}.csv".format(table=t)
print("Loading data to dataframe from {}".format(key))
tables[t] = pd.read_csv(data_path + key)
return tables
def left_outer_join(left_table, right_table, on):
new_table = left_table.merge(right_table, how='left', on=on)
return new_table
def join_tables_to_train_data(tables, base_table):
filename = 'bigTable.csv'
print("Joining {}.csv and items.csv".format(base_table))
bigTable = left_outer_join(tables[base_table], tables['items'], 'item_nbr')
print("Joining transactions.csv to bigTable")
bigTable = left_outer_join(bigTable,
tables['transactions'],
['store_nbr', 'date'])
return bigTable, filename
def add_days_off(bigTable, tables):
holidays = tables['holidays_events']
holidays['date'] = | pd.to_datetime(holidays['date'], format="%Y-%m-%d") | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.