prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import numpy as np
from tqdm import tqdm
from torchvision.datasets.folder import default_loader
from sklearn.model_selection import train_test_split
from nima.train.utils import SCORE_NAMES, TAG_NAMES
def _remove_all_not_found_image(df: pd.DataFrame, path_to_images: str) -> pd.DataFrame:
clean_rows = []
for _, row in df.iterrows():
image_id = row['image_id']
try:
_ = default_loader(os.path.join(path_to_images, f"{image_id}.jpg"))
except (FileNotFoundError, OSError):
pass
else:
clean_rows.append(row)
df_clean = pd.DataFrame(clean_rows)
return df_clean
def remove_all_not_found_image(df: pd.DataFrame, path_to_images: str, num_workers: int = 64) -> pd.DataFrame:
futures = []
results = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
for df_batch in np.array_split(df, num_workers):
future = executor.submit(_remove_all_not_found_image, df=df_batch, path_to_images=path_to_images)
futures.append(future)
for future in tqdm(as_completed(futures)):
results.append(future.result())
new_df = | pd.concat(results) | pandas.concat |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import unittest
import functools
import itertools
import types
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.stats
from skbio import Sequence, DNA, RNA, Protein, TabularMSA
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._decorator import overrides
from skbio.util._testing import ReallyEqualMixin
from skbio.metadata._testing import (MetadataMixinTests,
PositionalMetadataMixinTests)
from skbio.util import assert_data_frame_almost_equal
from skbio.util._testing import assert_index_equal
class TabularMSASubclass(TabularMSA):
"""Used for testing purposes."""
pass
class TestTabularMSAMetadata(unittest.TestCase, ReallyEqualMixin,
MetadataMixinTests):
def setUp(self):
self._metadata_constructor_ = functools.partial(TabularMSA, [])
class TestTabularMSAPositionalMetadata(unittest.TestCase, ReallyEqualMixin,
PositionalMetadataMixinTests):
def setUp(self):
def factory(axis_len, positional_metadata=None):
return TabularMSA([DNA('A' * axis_len)],
positional_metadata=positional_metadata)
self._positional_metadata_constructor_ = factory
class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
def test_from_dict_empty(self):
self.assertEqual(TabularMSA.from_dict({}), TabularMSA([], index=[]))
def test_from_dict_single_sequence(self):
self.assertEqual(TabularMSA.from_dict({'foo': DNA('ACGT')}),
TabularMSA([DNA('ACGT')], index=['foo']))
def test_from_dict_multiple_sequences(self):
msa = TabularMSA.from_dict(
{1: DNA('ACG'), 2: DNA('GGG'), 3: DNA('TAG')})
# Sort because order is arbitrary.
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('ACG'), DNA('GGG'), DNA('TAG')], index=[1, 2, 3]))
def test_from_dict_invalid_input(self):
# Basic test to make sure error-checking in the TabularMSA constructor
# is being invoked.
with self.assertRaisesRegex(
ValueError, 'must match the number of positions'):
TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
def test_constructor_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
TabularMSA([Sequence('')])
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*int'):
TabularMSA([42, DNA('')])
def test_constructor_not_monomorphic(self):
with self.assertRaisesRegex(TypeError,
'matching type.*RNA.*DNA'):
TabularMSA([DNA(''), RNA('')])
with self.assertRaisesRegex(TypeError,
'matching type.*float.*Protein'):
TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
def test_constructor_unequal_length(self):
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 0'):
TabularMSA([Protein(''), Protein('P')])
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 3'):
TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
def test_constructor_non_iterable(self):
with self.assertRaises(TypeError):
TabularMSA(42)
def test_constructor_minter_and_index_both_provided(self):
with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
index=['a', 'b'])
def test_constructor_invalid_minter_callable(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=float)
def test_constructor_missing_minter_metadata_key(self):
with self.assertRaises(KeyError):
TabularMSA([DNA('ACGT', metadata={'foo': 'bar'}), DNA('TGCA')],
minter='foo')
def test_constructor_unhashable_minter_metadata_key(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=[])
def test_constructor_index_length_mismatch_iterable(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
def test_constructor_index_length_mismatch_index_object(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=pd.Index([]))
def test_constructor_invalid_index_scalar(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=42)
def test_constructor_non_unique_labels(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
assert_index_equal(msa.index, pd.Int64Index([1, 1]))
def test_constructor_empty_no_index(self):
# sequence empty
msa = TabularMSA([])
self.assertIsNone(msa.dtype)
self.assertEqual(msa.shape, (0, 0))
assert_index_equal(msa.index, pd.RangeIndex(0))
with self.assertRaises(StopIteration):
next(iter(msa))
# position empty
seqs = [DNA(''), DNA('')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (2, 0))
assert_index_equal(msa.index, pd.RangeIndex(2))
self.assertEqual(list(msa), seqs)
def test_constructor_empty_with_labels(self):
# sequence empty
msa = TabularMSA([], minter=lambda x: x)
assert_index_equal(msa.index, pd.Index([]))
msa = TabularMSA([], index=iter([]))
assert_index_equal(msa.index, pd.Index([]))
# position empty
msa = TabularMSA([DNA('', metadata={'id': 42}),
DNA('', metadata={'id': 43})], minter='id')
assert_index_equal(msa.index, pd.Index([42, 43]))
msa = TabularMSA([DNA(''), DNA('')], index=iter([42, 43]))
assert_index_equal(msa.index, pd.Index([42, 43]))
def test_constructor_non_empty_no_labels_provided(self):
# 1x3
seqs = [DNA('ACG')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (1, 3))
assert_index_equal(msa.index, pd.RangeIndex(1))
self.assertEqual(list(msa), seqs)
# 3x1
seqs = [DNA('A'), DNA('C'), DNA('G')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 1))
assert_index_equal(msa.index, pd.RangeIndex(3))
self.assertEqual(list(msa), seqs)
def test_constructor_non_empty_with_labels_provided(self):
seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
msa = TabularMSA(seqs, minter=str)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 3))
assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
self.assertEqual(list(msa), seqs)
msa = TabularMSA(seqs, index=iter([42, 43, 44]))
assert_index_equal(msa.index, pd.Index([42, 43, 44]))
def test_constructor_works_with_iterator(self):
seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
msa = TabularMSA(iter(seqs), minter=str)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 3))
assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
self.assertEqual(list(msa), seqs)
def test_constructor_with_multiindex_index(self):
msa = TabularMSA([DNA('AA'), DNA('GG')],
index=[('foo', 42), ('bar', 43)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_constructor_with_multiindex_minter(self):
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa = TabularMSA([DNA('AC'), DNA('GG')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_copy_constructor_respects_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('----'), DNA('AAAA')])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
assert_index_equal(msa.index, pd.RangeIndex(3))
assert_index_equal(copy.index, pd.RangeIndex(3))
def test_copy_constructor_without_metadata(self):
msa = TabularMSA([DNA('ACGT'), DNA('----')])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
assert_index_equal(copy.index, pd.RangeIndex(2))
def test_copy_constructor_with_metadata(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
self.assertIsNot(msa.metadata, copy.metadata)
self.assertIsNot(msa.positional_metadata, copy.positional_metadata)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, copy.index)
def test_copy_constructor_state_override_with_minter(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa, metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
minter=str)
self.assertNotEqual(msa, copy)
self.assertEqual(
copy,
TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
minter=str))
def test_copy_constructor_state_override_with_index(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa, metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
index=['a', 'b'])
self.assertNotEqual(msa, copy)
self.assertEqual(
copy,
TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
index=['a', 'b']))
def test_copy_constructor_with_minter_and_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('----')], index=['idx1', 'idx2'])
with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
TabularMSA(msa, index=['a', 'b'], minter=str)
def test_dtype(self):
self.assertIsNone(TabularMSA([]).dtype)
self.assertIs(TabularMSA([Protein('')]).dtype, Protein)
with self.assertRaises(AttributeError):
TabularMSA([]).dtype = DNA
with self.assertRaises(AttributeError):
del TabularMSA([]).dtype
def test_shape(self):
shape = TabularMSA([DNA('ACG'), DNA('GCA')]).shape
self.assertEqual(shape, (2, 3))
self.assertEqual(shape.sequence, shape[0])
self.assertEqual(shape.position, shape[1])
with self.assertRaises(TypeError):
shape[0] = 3
with self.assertRaises(AttributeError):
TabularMSA([]).shape = (3, 3)
with self.assertRaises(AttributeError):
del TabularMSA([]).shape
def test_index_getter_default_index(self):
msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
assert_index_equal(msa.index, pd.RangeIndex(3))
# immutable
with self.assertRaises(TypeError):
msa.index[1] = 2
# original state is maintained
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_index_getter(self):
index = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')], minter=str).index
self.assertIsInstance(index, pd.Index)
assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
# immutable
with self.assertRaises(TypeError):
index[1] = 'AA'
# original state is maintained
assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
def test_index_mixed_type(self):
msa = TabularMSA([DNA('AC'), DNA('CA'), DNA('AA')],
index=['abc', 'd', 42])
assert_index_equal(msa.index, pd.Index(['abc', 'd', 42]))
def test_index_setter_empty(self):
msa = TabularMSA([])
msa.index = iter([])
assert_index_equal(msa.index, pd.Index([]))
def test_index_setter_non_empty(self):
msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
msa.index = range(3)
assert_index_equal(msa.index, pd.RangeIndex(3))
msa.index = range(3, 6)
assert_index_equal(msa.index, pd.RangeIndex(3, 6))
def test_index_setter_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
index = pd.Index(['ACGT', 'TGCA'])
assert_index_equal(msa.index, index)
with self.assertRaisesRegex(ValueError, 'Length mismatch.*2.*3'):
msa.index = iter(['ab', 'cd', 'ef'])
# original state is maintained
assert_index_equal(msa.index, index)
def test_index_setter_non_unique_index(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
msa.index = ['1', '1']
self.assertEqual(msa, TabularMSA([RNA('UUU'), RNA('AAA')],
index=['1', '1']))
def test_index_setter_tuples(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')])
msa.index = [('foo', 42), ('bar', 43)]
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(
msa.index,
pd.Index([('foo', 42), ('bar', 43)], tupleize_cols=True))
def test_index_setter_preserves_range_index(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
msa.index = pd.RangeIndex(2)
self.assertEqual(msa, TabularMSA([RNA('UUU'), RNA('AAA')]))
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_index_deleter(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
assert_index_equal(msa.index, pd.Index(['UUU', 'AAA']))
del msa.index
assert_index_equal(msa.index, pd.RangeIndex(2))
# Delete again.
del msa.index
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_bool(self):
self.assertFalse(TabularMSA([]))
self.assertFalse(TabularMSA([RNA('')]))
self.assertFalse(
TabularMSA([RNA('', metadata={'id': 1}),
RNA('', metadata={'id': 2})], minter='id'))
self.assertTrue(TabularMSA([RNA('U')]))
self.assertTrue(TabularMSA([RNA('--'), RNA('..')]))
self.assertTrue(TabularMSA([RNA('AUC'), RNA('GCA')]))
def test_len(self):
self.assertEqual(len(TabularMSA([])), 0)
self.assertEqual(len(TabularMSA([DNA('')])), 1)
self.assertEqual(len(TabularMSA([DNA('AT'), DNA('AG'), DNA('AT')])), 3)
def test_iter(self):
with self.assertRaises(StopIteration):
next(iter(TabularMSA([])))
seqs = [DNA(''), DNA('')]
self.assertEqual(list(iter(TabularMSA(seqs))), seqs)
seqs = [DNA('AAA'), DNA('GCT')]
self.assertEqual(list(iter(TabularMSA(seqs))), seqs)
def test_reversed(self):
with self.assertRaises(StopIteration):
next(reversed(TabularMSA([])))
seqs = [DNA(''), DNA('', metadata={'id': 42})]
self.assertEqual(list(reversed(TabularMSA(seqs))), seqs[::-1])
seqs = [DNA('AAA'), DNA('GCT')]
self.assertEqual(list(reversed(TabularMSA(seqs))), seqs[::-1])
def test_eq_and_ne(self):
# Each element contains the components necessary to construct a
# TabularMSA object: seqs and kwargs. None of these objects (once
# constructed) should compare equal to one another.
components = [
# empties
([], {}),
([RNA('')], {}),
([RNA('')], {'minter': str}),
# 1x1
([RNA('U')], {'minter': str}),
# 2x3
([RNA('AUG'), RNA('GUA')], {'minter': str}),
([RNA('AG'), RNA('GG')], {}),
# has labels
([RNA('AG'), RNA('GG')], {'minter': str}),
# different dtype
([DNA('AG'), DNA('GG')], {'minter': str}),
# different labels
([RNA('AG'), RNA('GG')], {'minter': lambda x: str(x) + '42'}),
# different sequence metadata
([RNA('AG', metadata={'id': 42}), RNA('GG')], {'minter': str}),
# different sequence data, same labels
([RNA('AG'), RNA('GA')],
{'minter': lambda x: 'AG' if 'AG' in x else 'GG'}),
# different MSA metadata
([RNA('AG'), RNA('GG')], {'metadata': {'foo': 42}}),
([RNA('AG'), RNA('GG')], {'metadata': {'foo': 43}}),
([RNA('AG'), RNA('GG')], {'metadata': {'foo': 42, 'bar': 43}}),
# different MSA positional metadata
([RNA('AG'), RNA('GG')],
{'positional_metadata': {'foo': [42, 43]}}),
([RNA('AG'), RNA('GG')],
{'positional_metadata': {'foo': [43, 44]}}),
([RNA('AG'), RNA('GG')],
{'positional_metadata': {'foo': [42, 43], 'bar': [43, 44]}}),
]
for seqs, kwargs in components:
obj = TabularMSA(seqs, **kwargs)
self.assertReallyEqual(obj, obj)
self.assertReallyEqual(obj, TabularMSA(seqs, **kwargs))
self.assertReallyEqual(obj, TabularMSASubclass(seqs, **kwargs))
for (seqs1, kwargs1), (seqs2, kwargs2) in \
itertools.combinations(components, 2):
obj1 = TabularMSA(seqs1, **kwargs1)
obj2 = TabularMSA(seqs2, **kwargs2)
self.assertReallyNotEqual(obj1, obj2)
self.assertReallyNotEqual(obj1,
TabularMSASubclass(seqs2, **kwargs2))
# completely different types
msa = TabularMSA([])
self.assertReallyNotEqual(msa, 42)
self.assertReallyNotEqual(msa, [])
self.assertReallyNotEqual(msa, {})
self.assertReallyNotEqual(msa, '')
def test_eq_constructed_from_different_iterables_compare_equal(self):
msa1 = TabularMSA([DNA('ACGT')])
msa2 = TabularMSA((DNA('ACGT'),))
self.assertReallyEqual(msa1, msa2)
def test_eq_ignores_minter_str_and_lambda(self):
# as long as the labels generated by the minters are the same, it
# doesn't matter whether the minters are equal.
msa1 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})],
minter=lambda x: x.metadata['id'])
self.assertReallyEqual(msa1, msa2)
def test_eq_minter_and_index(self):
# as long as the labels generated by the minters are the same, it
# doesn't matter whether the minters are equal.
msa1 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], index=['a'])
msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
self.assertReallyEqual(msa1, msa2)
def test_eq_default_index_and_equivalent_provided_index(self):
msa1 = TabularMSA([DNA('ACGT'), DNA('----'), DNA('....')])
msa2 = TabularMSA([DNA('ACGT'), DNA('----'), DNA('....')],
index=[0, 1, 2])
self.assertReallyEqual(msa1, msa2)
assert_index_equal(msa1.index, pd.RangeIndex(3))
assert_index_equal(msa2.index, pd.Int64Index([0, 1, 2]))
def test_reassign_index_empty(self):
# sequence empty
msa = TabularMSA([])
msa.reassign_index()
self.assertEqual(msa, TabularMSA([]))
assert_index_equal(msa.index, pd.RangeIndex(0))
msa.reassign_index(minter=str)
self.assertEqual(msa, TabularMSA([], minter=str))
assert_index_equal(msa.index, pd.Index([]))
# position empty
msa = TabularMSA([DNA('')])
msa.reassign_index()
self.assertEqual(msa, TabularMSA([DNA('')]))
assert_index_equal(msa.index, pd.RangeIndex(1))
msa.reassign_index(minter=str)
self.assertEqual(msa, TabularMSA([DNA('')], minter=str))
assert_index_equal(msa.index, pd.Index(['']))
def test_reassign_index_non_empty(self):
msa = TabularMSA([DNA('ACG', metadata={'id': 1}),
DNA('AAA', metadata={'id': 2})], minter=str)
assert_index_equal(msa.index, pd.Index(['ACG', 'AAA']))
msa.reassign_index(minter='id')
self.assertEqual(
msa,
TabularMSA([DNA('ACG', metadata={'id': 1}),
DNA('AAA', metadata={'id': 2})], minter='id'))
assert_index_equal(msa.index, pd.Index([1, 2]))
msa.reassign_index(mapping={1: 5})
self.assertEqual(
msa,
TabularMSA([DNA('ACG', metadata={'id': 1}),
DNA('AAA', metadata={'id': 2})], index=[5, 2]))
assert_index_equal(msa.index, pd.Index([5, 2]))
msa.reassign_index()
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_reassign_index_minter_and_mapping_both_provided(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
with self.assertRaisesRegex(ValueError, 'both.*mapping.*minter.*'):
msa.reassign_index(minter=str, mapping={"ACGT": "fleventy"})
# original state is maintained
assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
def test_reassign_index_mapping_invalid_type(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
with self.assertRaisesRegex(TypeError,
'mapping.*dict.*callable.*list'):
msa.reassign_index(mapping=['abc', 'def'])
# original state is maintained
assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
def test_reassign_index_with_mapping_dict_empty(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping={})
self.assertEqual(msa, TabularMSA(seqs, index=[0.5, 1.5, 2.5]))
def test_reassign_index_with_mapping_dict_subset(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 2.5: "c"}
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 1.5, 'c']))
def test_reassign_index_with_mapping_dict_superset(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 'b', 'c']))
def test_reassign_index_with_mapping_callable(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
msa = TabularMSA(seqs, index=[0, 1, 2])
msa.reassign_index(mapping=str)
self.assertEqual(msa, TabularMSA(seqs, index=['0', '1', '2']))
msa.reassign_index(mapping=lambda e: int(e) + 42)
self.assertEqual(msa, TabularMSA(seqs, index=[42, 43, 44]))
def test_reassign_index_non_unique_existing_index(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
msa = TabularMSA(seqs, index=[0.5, 0.5, 0.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 'a', 'a']))
def test_reassign_index_non_unique_new_index(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 1.5: "a", 2.5: "a"}
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 'a', 'a']))
def test_reassign_index_to_multiindex_with_minter(self):
msa = TabularMSA([DNA('AC'), DNA('.G')])
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa.reassign_index(minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('.G')],
index=[('foo', 42), ('bar', 43)]))
def test_reassign_index_to_multiindex_with_mapping(self):
msa = TabularMSA([DNA('AC'), DNA('.G')])
mapping = {0: ('foo', 42), 1: ('bar', 43)}
msa.reassign_index(mapping=mapping)
self.assertIsInstance(msa.index, pd.MultiIndex)
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('.G')],
index=[('foo', 42), ('bar', 43)]))
def test_sort_on_unorderable_msa_index(self):
msa = TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
index=[42, 41, 'foo'])
with self.assertRaises(TypeError):
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
index=[42, 41, 'foo']))
def test_sort_empty_on_msa_index(self):
msa = TabularMSA([], index=[])
msa.sort()
self.assertEqual(msa, TabularMSA([], index=[]))
msa = TabularMSA([], index=[])
msa.sort(ascending=False)
self.assertEqual(msa, TabularMSA([], index=[]))
def test_sort_single_sequence_on_msa_index(self):
msa = TabularMSA([DNA('ACGT')], index=[42])
msa.sort()
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[42]))
msa = TabularMSA([DNA('ACGT')], index=[42])
msa.sort(ascending=False)
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[42]))
def test_sort_multiple_sequences_on_msa_index(self):
msa = TabularMSA([
DNA('TC'), DNA('GG'), DNA('CC')], index=['z', 'a', 'b'])
msa.sort(ascending=True)
self.assertEqual(
msa,
TabularMSA([
DNA('GG'), DNA('CC'), DNA('TC')], index=['a', 'b', 'z']))
msa = TabularMSA([
DNA('TC'), DNA('GG'), DNA('CC')], index=['z', 'a', 'b'])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([
DNA('TC'), DNA('CC'), DNA('GG')], index=['z', 'b', 'a']))
def test_sort_on_labels_with_some_repeats(self):
msa = TabularMSA([
DNA('TCCG', metadata={'id': 10}),
DNA('TAGG', metadata={'id': 10}),
DNA('GGGG', metadata={'id': 8}),
DNA('TGGG', metadata={'id': 10}),
DNA('ACGT', metadata={'id': 0}),
DNA('TAGA', metadata={'id': 10})], minter='id')
msa.sort()
self.assertEqual(
msa,
TabularMSA([
DNA('ACGT', metadata={'id': 0}),
DNA('GGGG', metadata={'id': 8}),
DNA('TCCG', metadata={'id': 10}),
DNA('TAGG', metadata={'id': 10}),
DNA('TGGG', metadata={'id': 10}),
DNA('TAGA', metadata={'id': 10})], minter='id'))
def test_sort_on_key_with_all_repeats(self):
msa = TabularMSA([
DNA('TTT', metadata={'id': 'a'}),
DNA('TTT', metadata={'id': 'b'}),
DNA('TTT', metadata={'id': 'c'})], minter=str)
msa.sort()
self.assertEqual(
msa,
TabularMSA([
DNA('TTT', metadata={'id': 'a'}),
DNA('TTT', metadata={'id': 'b'}),
DNA('TTT', metadata={'id': 'c'})], minter=str))
def test_sort_default_index(self):
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')])
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')]))
def test_sort_default_index_descending(self):
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([DNA('CC'), DNA('GG'), DNA('TC')], index=[2, 1, 0]))
def test_sort_already_sorted(self):
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3])
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3]))
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[3, 2, 1])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[3, 2, 1]))
def test_sort_reverse_sorted(self):
msa = TabularMSA([DNA('T'), DNA('G'), DNA('A')], index=[3, 2, 1])
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('G'), DNA('T')], index=[1, 2, 3]))
msa = TabularMSA([DNA('T'), DNA('G'), DNA('A')], index=[1, 2, 3])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('G'), DNA('T')], index=[3, 2, 1]))
def test_sort_multiindex(self):
multiindex = [(2, 'a'), (1, 'c'), (3, 'b')]
sortedindex = [(1, 'c'), (2, 'a'), (3, 'b')]
msa = TabularMSA([DNA('A'), DNA('C'), DNA('G')], index=multiindex)
msa.sort()
self.assertEqual(msa, TabularMSA([DNA('C'), DNA('A'), DNA('G')],
index=sortedindex))
def test_sort_multiindex_with_level(self):
multiindex = [(2, 'a'), (1, 'c'), (3, 'b')]
first_sorted = [(1, 'c'), (2, 'a'), (3, 'b')]
second_sorted = [(2, 'a'), (3, 'b'), (1, 'c')]
msa = TabularMSA([DNA('A'), DNA('C'), DNA('G')], index=multiindex)
self.assertIsInstance(msa.index, pd.MultiIndex)
msa.sort(level=0)
self.assertEqual(msa, TabularMSA([DNA('C'), DNA('A'), DNA('G')],
index=first_sorted))
msa.sort(level=1)
self.assertEqual(msa, TabularMSA([DNA('A'), DNA('G'), DNA('C')],
index=second_sorted))
def test_to_dict_falsey_msa(self):
self.assertEqual(TabularMSA([]).to_dict(), {})
self.assertEqual(TabularMSA([RNA('')], index=['foo']).to_dict(),
{'foo': RNA('')})
def test_to_dict_non_empty(self):
seqs = [Protein('PAW', metadata={'id': 42}),
Protein('WAP', metadata={'id': -999})]
msa = TabularMSA(seqs, minter='id')
self.assertEqual(msa.to_dict(), {42: seqs[0], -999: seqs[1]})
def test_to_dict_default_index(self):
msa = TabularMSA([RNA('UUA'), RNA('-C-'), RNA('AAA')])
d = msa.to_dict()
self.assertEqual(d, {0: RNA('UUA'), 1: RNA('-C-'), 2: RNA('AAA')})
def test_to_dict_duplicate_labels(self):
msa = TabularMSA([DNA("A"), DNA("G")], index=[0, 0])
with self.assertRaises(ValueError) as cm:
msa.to_dict()
self.assertIn("unique", str(cm.exception))
def test_from_dict_to_dict_roundtrip(self):
d = {}
self.assertEqual(TabularMSA.from_dict(d).to_dict(), d)
# can roundtrip even with mixed key types
d1 = {'a': DNA('CAT'), 42: DNA('TAG')}
d2 = TabularMSA.from_dict(d1).to_dict()
self.assertEqual(d2, d1)
self.assertIs(d1['a'], d2['a'])
self.assertIs(d1[42], d2[42])
class TestContains(unittest.TestCase):
def test_no_sequences(self):
msa = TabularMSA([], index=[])
self.assertFalse('' in msa)
self.assertFalse('foo' in msa)
def test_with_str_labels(self):
msa = TabularMSA([RNA('AU'), RNA('A.')], index=['foo', 'bar'])
self.assertTrue('foo' in msa)
self.assertTrue('bar' in msa)
self.assertFalse('baz' in msa)
self.assertFalse(0 in msa)
def test_with_int_labels(self):
msa = TabularMSA([RNA('AU'), RNA('A.')], index=[42, -1])
self.assertTrue(42 in msa)
self.assertTrue(-1 in msa)
self.assertFalse(0 in msa)
self.assertFalse('foo' in msa)
class TestCopy(unittest.TestCase):
# Note: tests for metadata/positional_metadata are in mixin tests above.
def test_no_sequences(self):
msa = TabularMSA([])
msa_copy = copy.copy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
def test_with_sequences(self):
msa = TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')])
msa_copy = copy.copy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
self.assertIsNot(msa[0], msa_copy[0])
self.assertIsNot(msa[1], msa_copy[1])
msa_copy.append(DNA('AAAA'), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['bar'] = 42
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['foo'].append(2)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1, 2]}), DNA('TGCA')]))
def test_with_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], index=['foo', 'bar'])
msa_copy = copy.copy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, msa_copy.index)
msa_copy.index = [1, 2]
assert_index_equal(msa_copy.index, pd.Index([1, 2]))
assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
class TestDeepCopy(unittest.TestCase):
# Note: tests for metadata/positional_metadata are in mixin tests above.
def test_no_sequences(self):
msa = TabularMSA([])
msa_copy = copy.deepcopy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
def test_with_sequences(self):
msa = TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')])
msa_copy = copy.deepcopy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
self.assertIsNot(msa[0], msa_copy[0])
self.assertIsNot(msa[1], msa_copy[1])
msa_copy.append(DNA('AAAA'), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['bar'] = 42
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['foo'].append(2)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
def test_with_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], index=['foo', 'bar'])
msa_copy = copy.deepcopy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, msa_copy.index)
msa_copy.index = [1, 2]
assert_index_equal(msa_copy.index, pd.Index([1, 2]))
assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
class SharedIndexTests:
def get(self, obj, indexable):
raise NotImplementedError()
def test_tuple_too_big(self):
with self.assertRaises(ValueError):
self.get(TabularMSA([]), (None, None, None))
def test_empty_msa_slice(self):
msa = TabularMSA([])
new = self.get(msa, slice(None, None))
self.assertIsNot(msa, new)
self.assertEqual(msa, new)
def test_msa_slice_all_first_axis(self):
msa = TabularMSA([RNA("AAA", metadata={1: 1}),
RNA("AAU", positional_metadata={0: [1, 2, 3]})],
metadata={0: 0}, positional_metadata={1: [3, 2, 1]})
new_slice = self.get(msa, slice(None))
new_ellipsis = self.get(msa, Ellipsis)
self.assertIsNot(msa, new_slice)
for s1, s2 in zip(msa, new_slice):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_slice)
self.assertIsNot(msa, new_ellipsis)
for s1, s2 in zip(msa, new_ellipsis):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_ellipsis)
def test_msa_slice_all_both_axes(self):
msa = TabularMSA([RNA("AAA", metadata={1: 1}),
RNA("AAU", positional_metadata={0: [1, 2, 3]})],
metadata={0: 0}, positional_metadata={1: [3, 2, 1]})
new_slice = self.get(msa, (slice(None), slice(None)))
new_ellipsis = self.get(msa, (Ellipsis, Ellipsis))
self.assertIsNot(msa, new_slice)
for s1, s2 in zip(msa, new_slice):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_slice)
self.assertIsNot(msa, new_ellipsis)
for s1, s2 in zip(msa, new_ellipsis):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_ellipsis)
def test_bool_index_first_axis(self):
a = DNA("AAA", metadata={1: 1})
b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
c = DNA("AAC")
msa = TabularMSA([a, b, c], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False, True])
new = self.get(msa, [True, True, False])
self.assertEqual(new, TabularMSA([a, b], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False]))
def test_bool_index_second_axis(self):
a = DNA("AAA", metadata={1: 1})
b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
c = DNA("AAC")
msa = TabularMSA([a, b, c], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False, True])
new = self.get(msa, (Ellipsis, [True, True, False]))
self.assertEqual(new, TabularMSA([a[0, 1], b[0, 1], c[0, 1]],
metadata={0: 'x'},
positional_metadata={0: [1, 2]},
index=[True, False, True]))
def test_bool_index_both_axes(self):
a = DNA("AAA", metadata={1: 1})
b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
c = DNA("AAC")
msa = TabularMSA([a, b, c], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False, True])
new = self.get(msa, ([False, True, True], [True, True, False]))
self.assertEqual(new, TabularMSA([b[0, 1], c[0, 1]],
metadata={0: 'x'},
positional_metadata={0: [1, 2]},
index=[False, True]))
def test_bool_index_too_big(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")],
index=[False, True, False])
with self.assertRaises(IndexError):
self.get(msa, [False, False, False, False])
with self.assertRaises(IndexError):
self.get(msa, [True, True, True, True])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [True, False, True, False, True]))
with self.assertRaises(IndexError):
self.get(msa, ([True, False, True, False],
[True, False, True, False, False]))
def test_bool_index_too_small(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")],
index=[False, True, False])
with self.assertRaises(IndexError):
self.get(msa, [False])
with self.assertRaises(IndexError):
self.get(msa, [True])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [True]))
with self.assertRaises(IndexError):
self.get(msa, ([True, False], [True, False, True, False]))
def test_bad_scalar(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
with self.assertRaises((KeyError, TypeError)):
self.get(msa, "foo")
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, "foo"))
def test_bad_fancy_index(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
with self.assertRaises((KeyError, TypeError, ValueError)):
self.get(msa, [0, "foo"])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [0, "foo"]))
def test_asburd_slice(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
with self.assertRaises(TypeError):
self.get(msa, {set(1): 0})
class SharedPropertyIndexTests(SharedIndexTests):
def setUp(self):
self.combo_msa = TabularMSA([
DNA('ACGTA', metadata={0: 0},
positional_metadata={0: [1, 2, 3, 4, 5]}),
DNA('CGTAC', metadata={1: 1},
positional_metadata={1: [1, 2, 3, 4, 5]}),
DNA('GTACG', metadata={2: 2},
positional_metadata={2: [1, 2, 3, 4, 5]}),
DNA('TACGT', metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4, 5]}),
DNA('ACGTT', metadata={4: 4},
positional_metadata={4: [1, 2, 3, 4, 5]})
], index=list('ABCDE'), metadata={'x': 'x'},
positional_metadata={'y': [5, 4, 3, 2, 1]})
"""First off, sorry to the next person who has to deal with this.
The next few tests will try and slice by a bunch of stuff, with
all combinations. Each element in the two lists is a tuple where
the first element is the thing to slice with, and the second is
the equivalent fancy index which describes the same range.
This lets us describe the results a little more declaratively
without setting up a thousand tests for each possible combination.
This does mean the iloc via a fancy index and simple scalar must
work correctly.
"""
# This will be overriden for TestLoc because the first axis are labels
self.combo_first_axis = [
([], []),
(slice(0, 0), []),
(Ellipsis, [0, 1, 2, 3, 4]),
(slice(None), [0, 1, 2, 3, 4]),
(slice(0, 10000), [0, 1, 2, 3, 4]),
(3, 3),
(-4, 1),
([0], [0]),
([2], [2]),
(slice(1, 3), [1, 2]),
(slice(3, 0, -1), [3, 2, 1]),
([-3, 2, 1], [2, 2, 1]),
([-4, -3, -2, -1], [1, 2, 3, 4]),
(np.array([-3, 2, 1]), [2, 2, 1]),
([True, True, False, False, True], [0, 1, 4]),
(np.array([True, True, False, True, False]), [0, 1, 3]),
(range(3), [0, 1, 2]),
([slice(0, 2), slice(3, 4), 4], [0, 1, 3, 4])
]
# Same in both TestLoc and TestILoc
self.combo_second_axis = self.combo_first_axis
def test_combo_single_axis_natural(self):
for idx, exp in self.combo_first_axis:
self.assertEqual(self.get(self.combo_msa, idx),
self.combo_msa.iloc[exp],
msg="%r did not match iloc[%r]" % (idx, exp))
def test_combo_first_axis_only(self):
for idx, exp in self.combo_first_axis:
self.assertEqual(self.get(self.combo_msa, idx, axis=0),
self.combo_msa.iloc[exp, ...],
msg="%r did not match iloc[%r, ...]" % (idx, exp))
def test_combo_second_axis_only(self):
for idx, exp in self.combo_second_axis:
self.assertEqual(self.get(self.combo_msa, idx, axis=1),
self.combo_msa.iloc[..., exp],
msg="%r did not match iloc[..., %r]" % (idx, exp))
def test_combo_both_axes(self):
for idx1, exp1 in self.combo_first_axis:
for idx2, exp2 in self.combo_second_axis:
self.assertEqual(self.get(self.combo_msa, (idx1, idx2)),
self.combo_msa.iloc[exp1, exp2],
msg=("%r did not match iloc[%r, %r]"
% ((idx1, idx2), exp1, exp2)))
class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
def setUp(self):
SharedPropertyIndexTests.setUp(self)
self.combo_first_axis = [
([], []),
(slice('X', "Z"), []),
('A', 0),
('E', 4),
(['B'], [1]),
(np.asarray(['B']), [1]),
(slice('A', 'C', 2), [0, 2]),
(slice('C', 'A', -2), [2, 0]),
(slice('A', 'B'), [0, 1]),
(slice(None), [0, 1, 2, 3, 4]),
(slice('A', None), [0, 1, 2, 3, 4]),
(slice(None, 'C'), [0, 1, 2]),
(Ellipsis, [0, 1, 2, 3, 4]),
(self.combo_msa.index, [0, 1, 2, 3, 4]),
(['B', 'A', 'A', 'C'], [1, 0, 0, 2]),
(np.asarray(['B', 'A', 'A', 'C']), [1, 0, 0, 2]),
([True, False, True, True, False], [0, 2, 3]),
(np.asarray([True, False, True, True, False]), [0, 2, 3]),
]
def test_forced_axis_returns_copy(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertIsNot(msa.loc(axis=1), msa.loc)
def test_forced_axis_no_mutate(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertEqual(msa.loc(axis=1)[0], Sequence("EE"))
self.assertEqual(msa.loc[0], Protein("EVANTHQMVS"))
self.assertIsNone(msa.loc._axis)
def get(self, obj, indexable, axis=None):
if axis is None:
return obj.loc[indexable]
else:
return obj.loc(axis=axis)[indexable]
def test_complex_single_label(self):
a = DNA("ACG")
b = DNA("ACT")
c = DNA("ACA")
msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
self.assertIs(a, self.get(msa, (('a', 0),)))
self.assertIs(b, self.get(msa, (('a', 1),)))
self.assertIs(c, self.get(msa, (('b', 0),)))
def test_partial_label(self):
a = DNA("ACG")
b = DNA("ACT")
c = DNA("ACA")
msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
exp_a = TabularMSA([a, b], index=[0, 1])
exp_b = TabularMSA([c], index=[0])
self.assertEqual(self.get(msa, 'a'), exp_a)
self.assertEqual(self.get(msa, 'b'), exp_b)
def test_label_not_exists(self):
msa = TabularMSA([DNA("ACG")], index=['foo'])
with self.assertRaises(KeyError):
self.get(msa, 'bar')
def test_duplicate_index_nonscalar_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[0, 0, 1, 2])
self.assertEqual(self.get(msa, 0),
TabularMSA([a, b], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[0, 0]))
def test_duplicate_index_scalar_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[0, 0, 1, 2])
self.assertEqual(self.get(msa, 1), c)
def test_multiindex_complex(self):
a = DNA("ACG")
b = DNA("ACT")
c = DNA("ACA")
msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
exp = TabularMSA([a, c], index=[('a', 0), ('b', 0)])
self.assertEqual(self.get(msa, [('a', 0), ('b', 0)]), exp)
def test_fancy_index_missing_label(self):
msa = TabularMSA([DNA("ACG")], index=['foo'])
with self.assertRaises(KeyError):
self.get(msa, ['foo', 'bar'])
with self.assertRaises(KeyError):
self.get(msa, ['bar'])
def test_multiindex_fancy_indexing_incomplete_label(self):
a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c', 'd']},
index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
('b', 'x', 0)])
self.assertEqual(self.get(msa, (('a', 'x'), Ellipsis)),
TabularMSA([a, b], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c',
'd']},
index=[0, 1]))
def test_multiindex_complicated_axis(self):
a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c', 'd']},
index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
('b', 'x', 0)])
self.assertEqual(self.get(msa, (([False, True, False, True],
'x', 0), Ellipsis)),
TabularMSA([d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c',
'd']},
index=[('b', 'x', 0)]))
def test_multiindex_complicated_axis_empty_selection(self):
a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c', 'd']},
index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
('b', 'x', 0)])
self.assertEqual(self.get(msa, (([False, True, False, True],
'x', 2), Ellipsis)),
TabularMSA([], metadata={'x': 'y'},
# TODO: Change for #1198
positional_metadata=None,
index=[]))
def test_bool_index_scalar_bool_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[False, True, False, False])
self.assertEqual(self.get(msa, True), b)
def test_bool_index_nonscalar_bool_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[False, True, False, True])
self.assertEqual(self.get(msa, True),
TabularMSA([b, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[True, True]))
def test_unhashable_index_first_axis(self):
s = slice(0, 1)
msa = TabularMSA([Protein(""), Protein(""), Protein("")],
index=[s, slice(1, 2), slice(2, 3)])
with self.assertRaisesRegex(TypeError, 'unhashable'):
self.get(msa, Ellipsis, axis=0)
with self.assertRaisesRegex(TypeError, 'unhashable'):
self.get(msa, s, axis=0)
with self.assertRaisesRegex(TypeError, 'unhashable'):
self.get(msa, 0, axis=0)
def test_unhashable_index_second_axis(self):
msa = TabularMSA([Protein("AA"), Protein("CC"), Protein("AA")],
index=[slice(0, 1), slice(1, 2), slice(2, 3)])
with self.assertRaisesRegex(TypeError, 'unhashable'):
self.get(msa, Ellipsis, axis=1)
with self.assertRaisesRegex(TypeError, 'unhashable'):
self.get(msa, [0, 1], axis=1)
with self.assertRaisesRegex(TypeError, 'unhashable'):
self.get(msa, 0, axis=1)
def test_unhashable_index_both_axes(self):
s = [0, 1]
msa = TabularMSA([RNA("AA"), RNA("CC"), RNA("AA")],
index=[s, [1, 2], [2, 3]])
with self.assertRaisesRegex(TypeError, 'unhashable.*list'):
# This implies copy cannot be derived from getitem
self.get(msa, (Ellipsis, Ellipsis))
with self.assertRaisesRegex(TypeError, 'unhashable.*list'):
self.get(msa, (s, 0))
with self.assertRaisesRegex(TypeError, 'unhashable.*list'):
self.get(msa, ('x', 10))
def test_categorical_index_scalar_label(self):
msa = TabularMSA([RNA("ACUG"), RNA("ACUA"), RNA("AAUG"), RNA("AC-G")],
index=pd.CategoricalIndex(['a', 'b', 'b', 'c']))
self.assertEqual(self.get(msa, 'a'), RNA("ACUG"))
def test_categorical_index_nonscalar_label(self):
msa = TabularMSA([RNA("ACUG"), RNA("ACUA"), RNA("AAUG"), RNA("AC-G")],
index=pd.CategoricalIndex(['a', 'b', 'b', 'c']))
self.assertEqual(self.get(msa, 'b'),
TabularMSA([RNA("ACUA"), RNA("AAUG")],
index=pd.CategoricalIndex(
['b', 'b'], categories=['a', 'b', 'c'])
))
def test_float_index_out_of_order_slice(self):
msa = TabularMSA([DNA("ACGG"), DNA("AAGC"), DNA("AAAA"), DNA("ACTC")],
index=[0.1, 2.4, 5.1, 2.6])
with self.assertRaises(KeyError):
self.get(msa, slice(0.1, 2.7))
msa.sort()
result = self.get(msa, slice(0.1, 2.7))
self.assertEqual(result, TabularMSA([DNA("ACGG"), DNA("AAGC"),
DNA("ACTC")],
index=[0.1, 2.4, 2.6]))
def test_nonscalar_fancy_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
with self.assertRaisesRegex(TypeError,
'tuple.*independent.*MultiIndex'):
self.get(msa, ['a', 'b'])
def test_missing_first_nonscalar_fancy_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
with self.assertRaises(KeyError):
self.get(msa, ['x', 'a', 'b'])
def test_tuple_fancy_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
with self.assertRaisesRegex(TypeError, 'tuple.*pd.MultiIndex.*label'):
self.get(msa, ((('a', 0, 1), ('b', 0, 1)), Ellipsis))
def test_non_multiindex_tuple(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
with self.assertRaisesRegex(TypeError, 'tuple.*first axis'):
self.get(msa, ((0, 1), Ellipsis))
def test_assertion_exists_for_future_failure_of_get_sequence_loc(self):
# Ideally we wouldn't need this test or the branch, but the most common
# failure for pandas would be returning a series instead of the value.
# We should make sure that the user get's an error should this ever
# happen again. Getting a series of DNA looks pretty weird...
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
with self.assertRaises(AssertionError):
msa._get_sequence_loc_([1, 2])
class TestILoc(SharedPropertyIndexTests, unittest.TestCase):
def setUp(self):
SharedPropertyIndexTests.setUp(self)
self.combo_first_axis = self.combo_second_axis
def test_forced_axis_returns_copy(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertIsNot(msa.iloc(axis=1), msa.iloc)
def test_forced_axis_no_mutate(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertEqual(msa.iloc(axis=1)[0], Sequence("EE"))
self.assertEqual(msa.iloc[0], Protein("EVANTHQMVS"))
self.assertIsNone(msa.iloc._axis)
def get(self, obj, indexable, axis=None):
if axis is None:
return obj.iloc[indexable]
else:
return obj.iloc(axis=axis)[indexable]
def test_entire_fancy_first_axis(self):
msa = TabularMSA([
DNA("ACCA", metadata={'a': 'foo'},
positional_metadata={'a': [7, 6, 5, 4]}),
DNA("GGAA", metadata={'b': 'bar'},
positional_metadata={'b': [3, 4, 5, 6]})
], metadata={'c': 'baz'},
positional_metadata={'foo': [1, 2, 3, 4]})
new_np_simple = self.get(msa, np.arange(2))
new_list_simple = self.get(msa, [0, 1])
new_list_backwards = self.get(msa, [-2, -1])
self.assertIsNot(msa, new_np_simple)
self.assertEqual(msa, new_np_simple)
self.assertIsNot(msa, new_list_simple)
self.assertEqual(msa, new_list_simple)
self.assertIsNot(msa, new_list_backwards)
self.assertEqual(msa, new_list_backwards)
def test_fancy_entire_second_axis(self):
msa = TabularMSA([
DNA("ACCA", metadata={'a': 'foo'},
positional_metadata={'a': [7, 6, 5, 4]}),
DNA("GGAA", metadata={'b': 'bar'},
positional_metadata={'b': [3, 4, 5, 6]})
], metadata={'c': 'baz'},
positional_metadata={'foo': [1, 2, 3, 4]})
new_np_simple = self.get(msa, (Ellipsis, np.arange(4)))
new_list_simple = self.get(msa, (Ellipsis, [0, 1, 2, 3]))
new_list_backwards = self.get(msa, (Ellipsis, [-4, -3, -2, -1]))
self.assertIsNot(msa, new_np_simple)
self.assertEqual(msa, new_np_simple)
self.assertIsNot(msa, new_list_simple)
self.assertEqual(msa, new_list_simple)
self.assertIsNot(msa, new_list_backwards)
self.assertEqual(msa, new_list_backwards)
def test_fancy_entire_both_axes(self):
msa = TabularMSA([
DNA("ACCA", metadata={'a': 'foo'},
positional_metadata={'a': [7, 6, 5, 4]}),
DNA("GGAA", metadata={'b': 'bar'},
positional_metadata={'b': [3, 4, 5, 6]})
], metadata={'c': 'baz'},
positional_metadata={'foo': [1, 2, 3, 4]})
new_np_simple = self.get(msa, (np.arange(2), np.arange(4)))
new_list_simple = self.get(msa, ([0, 1], [0, 1, 2, 3]))
new_list_backwards = self.get(msa, ([-2, -1], [-4, -3, -2, -1]))
self.assertIsNot(msa, new_np_simple)
self.assertEqual(msa, new_np_simple)
self.assertIsNot(msa, new_list_simple)
self.assertEqual(msa, new_list_simple)
self.assertIsNot(msa, new_list_backwards)
self.assertEqual(msa, new_list_backwards)
def test_fancy_out_of_bound(self):
with self.assertRaises(IndexError):
self.get(TabularMSA([DNA('AC')]), [0, 1, 2])
with self.assertRaises(IndexError):
self.get(TabularMSA([DNA('AC')]), (Ellipsis, [0, 1, 2]))
def test_fancy_empty_both_axis(self):
msa = TabularMSA([DNA("ACGT", metadata={'x': 1}),
DNA("TGCA", metadata={'y': 2})], index=list("AB"))
new_np_simple = self.get(msa, (np.arange(0), np.arange(0)))
new_list_simple = self.get(msa, ([], []))
self.assertEqual(TabularMSA([]), new_np_simple)
self.assertEqual(TabularMSA([]), new_list_simple)
def test_fancy_standard_first_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, [0, 2]),
TabularMSA([a, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]},
index=[0, 2]))
def test_fancy_standard_second_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, (Ellipsis, [0, 2])),
TabularMSA([a[0, 2], b[0, 2], c[0, 2]],
metadata={3: 3},
positional_metadata={3: [1, 3]},
index=[0, 1, 2]))
def test_fancy_standard_both_axes(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, ([0, 2], [0, 2])),
TabularMSA([a[0, 2], c[0, 2]],
metadata={3: 3},
positional_metadata={3: [1, 3]},
index=[0, 2]))
def test_fancy_empty_first_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
# TODO: Change for #1198
self.assertEqual(self.get(msa, []),
TabularMSA([], metadata={3: 3}))
def test_fancy_empty_second_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, (Ellipsis, [])),
TabularMSA([a[0:0], b[0:0], c[0:0]],
metadata={3: 3},
positional_metadata={3: np.array(
[], dtype=int)}))
def test_fancy_empty_both_axes(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
# TODO: Change for #1198
self.assertEqual(self.get(msa, ([], [])),
TabularMSA([], metadata={3: 3}))
def test_fancy_out_of_bounds_first_axis(self):
msa = TabularMSA([DNA("ACGT"), DNA("GCAT")])
with self.assertRaises(IndexError):
self.get(msa, [10])
with self.assertRaises(IndexError):
self.get(msa, [0, 1, 10])
def test_fancy_out_of_bounds_second_axis(self):
msa = TabularMSA([DNA("ACGT"), DNA("GCAT")])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [10]))
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [1, 2, 4]))
def test_get_scalar_first_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GG", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b])
new0 = self.get(msa, 0)
new1 = self.get(msa, 1)
self.assertEqual(new0, a)
self.assertEqual(new1, b)
def test_get_scalar_second_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
new0 = self.get(msa, (Ellipsis, 0))
new1 = self.get(msa, (Ellipsis, 1))
self.assertEqual(new0,
Sequence("AG", metadata={'z': 5},
positional_metadata={'x': [1, np.nan],
'y': [np.nan, 3]}))
self.assertEqual(new1,
Sequence("AC", metadata={'z': 6},
positional_metadata={'x': [2, np.nan],
'y': [np.nan, 4]}))
def test_scalar_sliced_first_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, (1, [1, 3])),
DNA("CT", metadata={1: 1},
positional_metadata={1: [2, 4]}))
def test_scalar_sliced_second_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, ([1, 2], 3)),
Sequence("AT", metadata={3: 4},
positional_metadata={1: [4, np.nan],
2: [np.nan, 4]}))
def test_get_scalar_out_of_bound_first_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
with self.assertRaises(IndexError):
self.get(msa, 3)
def test_get_scalar_out_of_bound_second_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, 3))
class TestGetItem(SharedIndexTests, unittest.TestCase):
def get(self, obj, indexable):
return obj[indexable]
def test_uses_iloc_not_loc(self):
a = DNA("ACGA")
b = DNA("ACGT")
msa = TabularMSA([a, b], index=[1, 0])
self.assertIs(msa[0], a)
self.assertIs(msa[1], b)
class TestConstructor(unittest.TestCase):
def setUp(self):
self.seqs = [DNA("ACGT"), DNA("GCTA")]
self.m = {'x': 'y', 0: 1}
self.pm = pd.DataFrame({'foo': [1, 2, 3, 4]})
self.index = pd.Index(['a', 'b'])
self.msa = TabularMSA(self.seqs, metadata=self.m,
positional_metadata=self.pm, index=self.index)
def test_no_override(self):
result = self.msa._constructor_()
self.assertEqual(self.msa, result)
for seq1, seq2 in zip(result, self.msa):
self.assertIsNot(seq1, seq2)
self.assertIsNot(result.metadata, self.msa.metadata)
self.assertIsNot(result.positional_metadata,
self.msa.positional_metadata)
def test_sequence_override_same_seqs(self):
result = self.msa._constructor_(sequences=self.seqs)
self.assertEqual(self.msa, result)
for seq1, seq2 in zip(result, self.msa):
self.assertIsNot(seq1, seq2)
self.assertIsNot(result.metadata, self.msa.metadata)
self.assertIsNot(result.positional_metadata,
self.msa.positional_metadata)
def test_sequence_override(self):
seqs = [RNA("ACGU"), RNA("GCUA")]
result = self.msa._constructor_(sequences=seqs)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), seqs)
assert_index_equal(result.index, self.index)
self.assertEqual(result.metadata, self.m)
assert_data_frame_almost_equal(result.positional_metadata, self.pm)
def test_no_override_no_md(self):
msa = TabularMSA(self.seqs, index=self.index)
self.assertEqual(msa, msa._constructor_())
def test_metadata_override(self):
new_md = {'foo': {'x': 0}}
result = self.msa._constructor_(metadata=new_md)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), self.seqs)
assert_index_equal(result.index, self.index)
self.assertEqual(result.metadata, new_md)
assert_data_frame_almost_equal(result.positional_metadata, self.pm)
def test_positional_metadata_override(self):
new_pm = pd.DataFrame({'x': [1, 2, 3, 4]})
result = self.msa._constructor_(positional_metadata=new_pm)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), self.seqs)
assert_index_equal(result.index, self.index)
self.assertEqual(result.metadata, self.m)
assert_data_frame_almost_equal(result.positional_metadata, new_pm)
def test_index_override(self):
new_index = pd.Index([('a', 0), ('b', 1)])
result = self.msa._constructor_(index=new_index)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), self.seqs)
assert_index_equal(result.index, new_index)
self.assertEqual(result.metadata, self.m)
assert_data_frame_almost_equal(result.positional_metadata, self.pm)
class TestAppend(unittest.TestCase):
# Error cases
def test_invalid_minter_index_reset_index_parameter_combos(self):
msa = TabularMSA([])
param_combos = (
{},
{'minter': str, 'index': 'foo', 'reset_index': True},
{'minter': str, 'index': 'foo'},
{'minter': str, 'reset_index': True},
{'index': 'foo', 'reset_index': True}
)
for params in param_combos:
with self.assertRaisesRegex(ValueError,
"one of.*minter.*index.*reset_index"):
msa.append(DNA('ACGT'), **params)
self.assertEqual(msa, TabularMSA([]))
def test_invalid_dtype(self):
msa = TabularMSA([])
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
msa.append(Sequence(''), reset_index=True)
self.assertEqual(msa, TabularMSA([]))
def test_dtype_mismatch_rna(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, 'matching type.*RNA.*DNA'):
msa.append(RNA('UUUU'), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_dtype_mismatch_float(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, 'matching type.*float.*DNA'):
msa.append(42.0, reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(
ValueError, 'must match the number of positions.*5 != 4'):
msa.append(DNA('ACGTA'), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_invalid_minter(self):
msa = TabularMSA([DNA('ACGT')], index=['foo'])
with self.assertRaises(KeyError):
msa.append(DNA('AAAA'), minter='id')
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
# Valid cases: `minter`
def test_minter_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('ACGT'), minter=str)
self.assertEqual(msa, TabularMSA([DNA('ACGT')], minter=str))
def test_minter_metadata_key(self):
msa = TabularMSA([DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'})],
minter='id')
msa.append(DNA('', metadata={'id': 'c'}), minter='id')
self.assertEqual(
msa,
TabularMSA([
DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'}),
DNA('', metadata={'id': 'c'})], minter='id'))
def test_minter_callable(self):
msa = TabularMSA([DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'})],
minter='id')
msa.append(DNA(''), minter=str)
self.assertEqual(
msa,
TabularMSA([
DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'}),
DNA('')], index=['a', 'b', '']))
def test_multiindex_minter_empty_msa(self):
def multiindex_minter(seq):
return ('foo', 42)
msa = TabularMSA([])
msa.append(DNA('AC'), minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42)]))
def test_multiindex_minter_non_empty_msa(self):
def multiindex_minter(seq):
return ('baz', 44)
msa = TabularMSA([RNA('UU'), RNA('CA')],
index=[('foo', 42), ('bar', 43)])
msa.append(RNA('AC'), minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44)]))
# Valid cases: `index`
def test_index_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('ACGT'), index='a')
self.assertEqual(
msa,
TabularMSA([DNA('ACGT')], index=['a']))
def test_index_non_empty_msa(self):
msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
msa.append(DNA('--'), index='foo')
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('GT'), DNA('--')],
index=['a', 'b', 'foo']))
def test_multiindex_index_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('AA'), index=('foo', 42))
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42)]))
def test_multiindex_index_non_empty_msa(self):
msa = TabularMSA([RNA('A'), RNA('C')],
index=[('foo', 42), ('bar', 43)])
msa.append(RNA('U'), index=('baz', 44))
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44)]))
# Valid cases: `reset_index`
def test_reset_index_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('ACGT'), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
assert_index_equal(msa.index, pd.RangeIndex(1))
def test_reset_index_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('CCCC')])
msa.append(DNA('ACGT'), reset_index=True)
self.assertEqual(msa,
TabularMSA([DNA('ACGT'), DNA('CCCC'), DNA('ACGT')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_reset_index_non_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('CCCC')], index=['foo', 'bar'])
msa.append(DNA('ACGT'), reset_index=True)
self.assertEqual(msa,
TabularMSA([DNA('ACGT'), DNA('CCCC'), DNA('ACGT')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_reset_index_bool_cast(self):
msa = TabularMSA([RNA('AC'), RNA('UU')], index=[42, 43])
msa.append(RNA('..'), reset_index='abc')
self.assertEqual(msa, TabularMSA([RNA('AC'), RNA('UU'), RNA('..')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
# Valid cases (misc)
def test_index_type_change(self):
msa = TabularMSA([DNA('A'), DNA('.')])
msa.append(DNA('C'), index='foo')
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
def test_duplicate_index(self):
msa = TabularMSA([DNA('A'), DNA('.')], index=['foo', 'bar'])
msa.append(DNA('C'), index='foo')
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('.'), DNA('C')],
index=['foo', 'bar', 'foo']))
def test_empty_msa_with_positional_metadata_no_new_positions(self):
msa = TabularMSA([], positional_metadata={'foo': []})
msa.append(DNA(''), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('')], positional_metadata={'foo': []}))
def test_empty_msa_with_positional_metadata_add_new_positions(self):
# bug in 0.4.2
msa = TabularMSA([], positional_metadata={'foo': []})
msa.append(DNA('AA'), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('AA')]))
class TestExtend(unittest.TestCase):
# Error cases
#
# Note: these tests check that the MSA isn't mutated when an error is
# raised. Where applicable, the "invalid" sequence is preceded by valid
# sequence(s) to test one possible (buggy) implementation of `extend`:
# looping over `sequences` and calling `append`. These tests ensure that
# valid sequences aren't appended to the MSA before the error is raised.
def test_invalid_minter_index_reset_index_parameter_combos(self):
msa = TabularMSA([])
param_combos = (
{},
{'minter': str, 'index': 'foo', 'reset_index': True},
{'minter': str, 'index': 'foo'},
{'minter': str, 'reset_index': True},
{'index': 'foo', 'reset_index': True}
)
for params in param_combos:
with self.assertRaisesRegex(ValueError,
"one of.*minter.*index.*reset_index"):
msa.extend([DNA('ACGT')], **params)
self.assertEqual(msa, TabularMSA([]))
def test_from_tabular_msa_index_param_still_required(self):
msa = TabularMSA([DNA('AC'), DNA('TG')])
with self.assertRaisesRegex(ValueError,
"one of.*minter.*index.*reset_index"):
msa.extend(TabularMSA([DNA('GG'), DNA('CC')]))
self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('TG')]))
def test_invalid_dtype(self):
msa = TabularMSA([])
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
msa.extend([Sequence('')], reset_index=True)
self.assertEqual(msa, TabularMSA([]))
def test_dtype_mismatch_rna(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, 'matching type.*RNA.*DNA'):
msa.extend([DNA('----'), RNA('UUUU')], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_dtype_mismatch_float(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, 'matching type.*float.*DNA'):
msa.extend([DNA('GGGG'), 42.0], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(
ValueError, 'must match the number of positions.*5 != 4'):
msa.extend([DNA('TTTT'), DNA('ACGTA')], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_invalid_minter(self):
msa = TabularMSA([DNA('ACGT')], index=['foo'])
with self.assertRaises(KeyError):
msa.extend([DNA('AAAA', metadata={'id': 'foo'}),
DNA('----')], minter='id')
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
def test_invalid_index(self):
msa = TabularMSA([DNA('ACGT')], index=['foo'])
with self.assertRaises(TypeError):
msa.extend([DNA('----')], index=42)
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
def test_sequences_index_length_mismatch(self):
msa = TabularMSA([])
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*3'):
msa.extend([DNA('TTTT'), DNA('ACGT')], index=['a', 'b', 'c'])
self.assertEqual(msa, TabularMSA([]))
# Valid cases: `minter`
def test_minter_empty_msa(self):
msa = TabularMSA([])
msa.extend([RNA('UU'), RNA('--')], minter=str)
self.assertEqual(msa, TabularMSA([RNA('UU'), RNA('--')], minter=str))
def test_minter_metadata_key(self):
msa = TabularMSA([DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'})],
minter='id')
msa.extend([DNA('', metadata={'id': 'c'}),
DNA('', metadata={'id': 'd'})], minter='id')
self.assertEqual(
msa,
TabularMSA([
DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'}),
DNA('', metadata={'id': 'c'}),
DNA('', metadata={'id': 'd'})], minter='id'))
def test_minter_callable(self):
msa = TabularMSA([DNA('A', metadata={'id': 'a'}),
DNA('C', metadata={'id': 'b'})],
minter='id')
msa.extend([DNA('G'), DNA('T')], minter=str)
self.assertEqual(
msa,
TabularMSA([
DNA('A', metadata={'id': 'a'}),
DNA('C', metadata={'id': 'b'}),
DNA('G'),
DNA('T')], index=['a', 'b', 'G', 'T']))
def test_multiindex_minter_empty_msa(self):
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa = TabularMSA([])
msa.extend([DNA('AC'), DNA('GG')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_multiindex_minter_non_empty_msa(self):
def multiindex_minter(seq):
if str(seq) == 'C':
return ('baz', 44)
else:
return ('baz', 45)
msa = TabularMSA([DNA('A'), DNA('G')],
index=[('foo', 42), ('bar', 43)])
msa.extend([DNA('C'), DNA('T')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(
msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44), ('baz', 45)]))
# Valid cases: `index`
def test_index_empty_msa(self):
msa = TabularMSA([])
msa.extend([RNA('UAC'), RNA('AAU')], index=['foo', 'bar'])
self.assertEqual(msa, TabularMSA([RNA('UAC'), RNA('AAU')],
index=['foo', 'bar']))
def test_index_non_empty_msa(self):
msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
msa.extend([DNA('--'), DNA('..')], index=['foo', 'bar'])
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('GT'), DNA('--'), DNA('..')],
index=['a', 'b', 'foo', 'bar']))
def test_multiindex_index_empty_msa(self):
msa = TabularMSA([])
msa.extend([DNA('AA'), DNA('GG')], index=[('foo', 42), ('bar', 43)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_multiindex_index_non_empty_msa(self):
msa = TabularMSA([DNA('.'), DNA('-')],
index=[('foo', 42), ('bar', 43)])
msa.extend([DNA('A'), DNA('G')], index=[('baz', 44), ('baz', 45)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(
msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44), ('baz', 45)]))
def test_index_object_empty_msa(self):
msa = TabularMSA([])
msa.extend([DNA('AA'), DNA('GG')], index=pd.RangeIndex(2))
self.assertEqual(msa, TabularMSA([DNA('AA'), DNA('GG')]))
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_index_object_non_empty_msa(self):
msa = TabularMSA([DNA('CT'), DNA('GG')])
msa.extend([DNA('AA'), DNA('GG')], index= | pd.RangeIndex(2) | pandas.RangeIndex |
# %%
import networkx as nx
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from networkx import Graph
import os
import time
from astropy.stats import RipleysKEstimator
import logging
logging.basicConfig(level=logging.INFO)
from ..utils.general import make_iterable
from .utils import get_node_interactions, get_interaction_score, permute_labels
from collections import Counter
# %%
def _infiltration_local_deprecated(G: Graph,
interaction1=('tumor', 'immune'),
interaction2=('immune', 'immune')):
ids = np.unique(interaction1, interaction2)
nodes_inter1 = [node for node in G.nodes if G.nodes[node]['attr'] in ids]
nodes_inter1 = [node for node in G.nodes if G.nodes[node]['attr'] in interaction1]
nodes_inter2 = [node for node in G.nodes if G.nodes[node]['attr'] in interaction2]
for node in ids:
neigh = G[node]
counts = Counter([G.nodes[i]['attr'] for i in neigh])
def _infiltration_local(G: Graph,
interaction1=('tumor', 'immune'),
interaction2=('immune', 'immune')):
ids = np.unique(interaction1, interaction2)
nodes = [node for node in G.nodes if G.nodes[node]['attr'] in ids]
for node in nodes:
neigh = G[node]
subG = G.subgraph()
pass
def _infiltration(node_interactions: pd.DataFrame, interaction1=('tumor', 'immune'),
interaction2=('immune', 'immune')) -> float:
"""
Compute infiltration score between two species.
Args:
node_interactions: Dataframe with columns `source_label` and `target_label` that specifies interactions.
interaction1: labels of enumerator interaction
interaction2: labels of denominator interaction
Notes:
The infiltration score is computed as #interactions1 / #interactions2.
Returns:
Interaction score
"""
nint = node_interactions # verbose
(a1, a2), (b1, b2) = interaction1, interaction2
num = nint[(nint.source_label == a1) & (nint.target_label == a2)].shape[0]
denom = nint[(nint.source_label == b1) & (nint.target_label == b2)].shape[0]
return num / denom if denom > 0 else np.nan # TODO: np.inf or np.nan
class Interactions:
"""
Estimator to quantify interaction strength between different species in the sample.
"""
VALID_MODES = ['classic', 'histoCAT', 'proportion']
VALID_PREDICTION_TYPES = ['pvalue', 'observation', 'diff']
def __init__(self, so, spl: str, attr: str = 'meta_id', mode: str = 'classic', n_permutations: int = 500,
random_seed=None, alpha: float = .01, graph_key: str = 'knn'):
"""Estimator to quantify interaction strength between different species in the sample.
Args:
so: SpatialOmics
spl: Sample for which to compute the interaction strength
attr: Categorical feature in SpatialOmics.obs to use for the grouping
mode: One of {classic, histoCAT, proportion}, see notes
n_permutations: Number of permutations to compute p-values and the interactions strength score (mode diff)
random_seed: Random seed for permutations
alpha: Threshold for significance
graph_key: Specifies the graph representation to use in so.G[spl] if `local=True`.
Notes:
classic and histoCAT are python implementations of the corresponding methods pubished by the Bodenmiller lab at UZH.
The proportion method is similar to the classic method but normalises the score by the number of edges and is thus bound [0,1].
"""
self.so = so
self.spl: str = spl
self.graph_key = graph_key
self.g: Graph = so.G[spl][graph_key]
self.attr: str = attr
self.data: pd.Series = so.obs[spl][attr]
self.mode: str = mode
self.n_perm: int = int(n_permutations)
self.random_seed = random_seed if random_seed else so.random_seed
self.rng = np.random.default_rng(random_seed)
self.alpha: float = alpha
self.fitted: bool = False
# set dtype categories of data to attributes that are in the data
self.data = self.data.astype(CategoricalDtype(categories=self.data.unique(), ordered=False))
# path where h0 models would be
self.path = os.path.expanduser(f'~/.cache/spatialHeterogeneity/h0-models/')
self.h0_file = f'{spl}_{attr}_{graph_key}_{mode}.pkl'
self.h0 = None
def fit(self, prediction_type: str = 'observation', try_load: bool = True) -> None:
"""Compute the interactions scores for the sample.
Args:
prediction_type: One of {observation, pvalue, diff}, see Notes
try_load: load pre-computed permutation results if available
Returns:
Notes:
`observation`: computes the observed interaction strength in the sample
`pvalue`: computes the P-value of a two-sided t-test for the interactions strength based on the random permutations
`diff`: computes the difference between observed and average interaction strength (across permutations)
"""
if prediction_type not in self.VALID_PREDICTION_TYPES:
raise ValueError(
f'invalid `prediction_type` {prediction_type}. Available modes are {self.VALID_PREDICTION_TYPES}')
self.prediction_type = prediction_type
# extract observed interactions
if self.mode == 'classic':
relative_freq, observed = False, False
elif self.mode == 'histoCAT':
relative_freq, observed = False, True
elif self.mode == 'proportion':
relative_freq, observed = True, False
else:
raise ValueError(f'invalid mode {self.mode}. Available modes are {self.VALID_MODES}')
node_interactions = get_node_interactions(self.g, self.data)
obs_interaction = get_interaction_score(node_interactions, relative_freq=relative_freq, observed=observed)
self.obs_interaction = obs_interaction.set_index(['source_label', 'target_label'])
if not prediction_type == 'observation':
if try_load:
if os.path.isdir(self.path) and self.h0_file in os.listdir(self.path):
logging.info(
f'loading h0 for {self.spl}, graph type {self.graph_key} and mode {self.mode}')
self.h0 = pd.read_pickle(os.path.join(self.path, self.h0_file))
# if try_load was not successful
if self.h0 is None:
logging.info(
f'generate h0 for {self.spl}, graph type {self.graph_key} and mode {self.mode} and attribute {self.attr}')
self.generate_h0(relative_freq=relative_freq, observed=observed, save=True)
self.fitted = True
def predict(self) -> pd.DataFrame:
"""Predict interactions strengths of observations.
Returns: A dataframe with the interaction results.
"""
if self.prediction_type == 'observation':
return self.obs_interaction
elif self.prediction_type == 'pvalue':
# TODO: Check p-value computation
data_perm = pd.concat((self.obs_interaction, self.h0), axis=1)
data_perm.fillna(0, inplace=True)
data_pval = pd.DataFrame(index=data_perm.index)
# see h0_models_analysis.py for alterantive p-value computation
data_pval['score'] = self.obs_interaction.score
data_pval['perm_mean'] = data_perm.apply(lambda x: np.mean(x[1:]), axis=1, raw=True)
data_pval['perm_std'] = data_perm.apply(lambda x: np.std(x[1:]), axis=1, raw=True)
data_pval['perm_median'] = data_perm.apply(lambda x: np.median(x[1:]), axis=1, raw=True)
data_pval['p_gt'] = data_perm.apply(lambda x: np.sum(x[1:] >= x[0]) / self.n_perm, axis=1, raw=True)
data_pval['p_lt'] = data_perm.apply(lambda x: np.sum(x[1:] <= x[0]) / self.n_perm, axis=1, raw=True)
data_pval['perm_n'] = data_perm.apply(lambda x: self.n_perm, axis=1, raw=True)
data_pval['p'] = data_pval.apply(lambda x: x.p_gt if x.p_gt <= x.p_lt else x.p_lt, axis=1)
data_pval['sig'] = data_pval.apply(lambda x: x.p < self.alpha, axis=1)
data_pval['attraction'] = data_pval.apply(lambda x: x.p_gt <= x.p_lt, axis=1)
data_pval['sigval'] = data_pval.apply(lambda x: np.sign((x.attraction - .5) * x.sig), axis=1)
return data_pval
elif self.prediction_type == 'diff':
data_perm = pd.concat((self.obs_interaction, self.h0), axis=1)
data_perm.fillna(0, inplace=True)
data_pval = pd.DataFrame(index=data_perm.index)
# see h0_models_analysis.py for alterantive p-value computation
data_pval['score'] = self.obs_interaction.score
data_pval['perm_mean'] = data_perm.apply(lambda x: np.mean(x[1:]), axis=1, raw=True)
data_pval['perm_std'] = data_perm.apply(lambda x: np.std(x[1:]), axis=1, raw=True)
data_pval['perm_median'] = data_perm.apply(lambda x: np.median(x[1:]), axis=1, raw=True)
data_pval['diff'] = (data_pval['score'] - data_pval['perm_mean'])
return data_pval
else:
raise ValueError(
f'invalid `prediction_type` {self.prediction_type}. Available modes are {self.VALID_PREDICTION_TYPES}')
def generate_h0(self, relative_freq, observed, save=True):
connectivity = get_node_interactions(self.g).reset_index(drop=True)
res_perm, durations = [], []
for i in range(self.n_perm):
tic = time.time()
data = permute_labels(self.data, self.rng)
source_label = data.loc[connectivity.source].values.ravel()
target_label = data.loc[connectivity.target].values.ravel()
# create pd.Series and node_interaction pd.DataFrame
source_label = pd.Series(source_label, name='source_label', dtype=self.data.dtype)
target_label = | pd.Series(target_label, name='target_label', dtype=self.data.dtype) | pandas.Series |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, pd.Timestamp("2015-01-07")),
(30, 230 * 11, cls.window_test_start_date),
(40, 240, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5 * 6, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7 * 0.8, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")),
(30, 230 * 11 * 12, cls.window_test_start_date),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-21"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-22"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-09"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 1 / 4, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-12"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-13", "2015-01-14")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
test_start_date = pd.Timestamp("2015-01-06", tz="utc")
test_end_date = pd.Timestamp("2015-01-12", tz="utc")
split_adjusted_asof = pd.Timestamp("2015-01-08")
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
],
"estimate1": [1100.0, 1200.0],
"estimate2": [2100.0, 2200.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-08"),
pd.Timestamp("2015-01-11"),
],
"estimate1": [1110.0, 1210.0],
"estimate2": [2110.0, 2210.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
}
)
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (0.3, 3.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
sid_1_splits = pd.DataFrame(
{
SID_FIELD_NAME: 1,
"ratio": (0.4, 4.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithSplitAdjustedMultipleEstimateColumns, cls).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]["estimate1"])
assert_almost_equal(estimate2, timelines[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
assert_almost_equal(estimate1, timelines_1q_out[today]["estimate1"])
class SomeFactor2(CustomFactor):
inputs = [dataset2.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate2):
assert_almost_equal(estimate2, timelines_2q_out[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est1": SomeFactor1(), "est2": SomeFactor2()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
class PreviousWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate1", "estimate2"],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 3),
"estimate2": np.array([[np.NaN, np.NaN]] * 3),
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 3),
"estimate2": np.array([[np.NaN, np.NaN]] * 3),
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 1110.0]]),
"estimate2": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 2110.0]]),
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]]
+ [[np.NaN, 1110.0 * 4]]
+ [[1100 * 3.0, 1110.0 * 4]]
),
"estimate2": np.array(
[[np.NaN, np.NaN]]
+ [[np.NaN, 2110.0 * 4]]
+ [[2100 * 3.0, 2110.0 * 4]]
),
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]] * 2 + [[1200 * 3.0, 1210.0 * 4]]
),
"estimate2": np.array(
[[np.NaN, np.NaN]] * 2 + [[2200 * 3.0, 2210.0 * 4]]
),
},
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate2": np.array(
[[np.NaN, np.NaN]] * 2 + [[2100 * 3.0, 2110.0 * 4]]
)
},
}
class NextWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate1", "estimate2"],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]] + [[1100.0 * 1 / 0.3, 1110.0 * 1 / 0.4]] * 2
),
"estimate2": np.array(
[[np.NaN, np.NaN]] + [[2100.0 * 1 / 0.3, 2110.0 * 1 / 0.4]] * 2
),
},
| pd.Timestamp("2015-01-07", tz="utc") | pandas.Timestamp |
import os
import re
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta,date
from collections import Counter
from glob import glob
import visualization._plot_method as senti_ploter
today= str(date.today())
def calculate_top_words(result_path,topn):
""" get top word
"""
stopword = | pd.read_csv("dictionary\\twitter_stopwords.txt",index_col=0) | pandas.read_csv |
""" parquet compat """
from __future__ import annotations
from distutils.version import LooseVersion
import io
import os
from typing import Any, AnyStr, Dict, List, Optional, Tuple
from warnings import catch_warnings
from pandas._typing import FilePathOrBuffer, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
from pandas import DataFrame, MultiIndex, get_option
from pandas.core import generic
from pandas.io.common import (
IOHandles,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
def get_engine(engine: str) -> BaseImpl:
""" return our implementation """
if engine == "auto":
engine = get_option("io.parquet.engine")
if engine == "auto":
# try engines in this order
engine_classes = [PyArrowImpl, FastParquetImpl]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"A suitable version of "
"pyarrow or fastparquet is required for parquet "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
def _get_path_or_handle(
path: FilePathOrBuffer,
fs: Any,
storage_options: StorageOptions = None,
mode: str = "rb",
is_dir: bool = False,
) -> Tuple[FilePathOrBuffer, Optional[IOHandles], Any]:
"""File handling for PyArrow."""
path_or_handle = stringify_path(path)
if is_fsspec_url(path_or_handle) and fs is None:
fsspec = | import_optional_dependency("fsspec") | pandas.compat._optional.import_optional_dependency |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import streamlit as st
from sklearn.ensemble import RandomForestRegressor
class Predicoes:
def __init__(self, options):
self.month_names_missing = ['July', 'August', 'September', 'October', 'November', 'December']
self.month_names = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
self.y = options
self.range_dates = [f'{mes + 1:0>2}/{2017 + ano}' for ano in range(5) for mes in range(12)]
self.start_date, self.end_date = st.sidebar.select_slider('Intervalo de datas', options=self.range_dates,
value=('01/2020', '03/2021'))
self.qtds = st.sidebar.selectbox('Quantidade', options=self.y, index=1)
def filter_dates(self, df):
start_year = int(self.start_date[3:])
start_month = int(self.start_date[:2])
end_year = int(self.end_date[3:])
end_month = int(self.end_date[:2])
years = list()
for y in range(end_year - start_year + 1):
years.append(f'{start_year + y}')
months = list()
qtd_months = (len(years) * 12) - (start_month - 1) - (12 - end_month)
for m in range(qtd_months):
months.append(f'{self.month_names[(start_month + m - 1) % 12]}')
return self.__calc_intervalo_meses_ano(df, months, years)
def __calc_intervalo_meses_ano(self, df, months, years):
months_ = months
list_months = ['Hi!'] * len(years)
cont = 0
i = 0
for y in years:
months_ = months_[cont:]
cont = 0
for m in months_:
cont += 1
if m == 'December':
break
list_months[i] = months_[0:cont]
i += 1
i = 0
df_ = df[(df['Ano'].isin([years[i]])) & (df['Mês'].isin(list_months[i]))]
if len(years) > 1:
for y in years[1:]:
i += 1
df_1 = df[(df['Ano'].isin([y])) & (df['Mês'].isin(list_months[i]))]
frames = [df_, df_1]
df_ = pd.concat(frames)
return df_
def sort(self, df):
sorter_index = dict(zip(self.month_names, range(len(self.month_names))))
df['mes_rank'] = df['Mês'].map(sorter_index)
df = df.sort_values(['Ano', 'mes_rank'])
return df.drop(columns=['mes_rank'])
def create_plot(self, df, by, title):
fig = go.Figure()
for faixa in df[by].unique():
data = df[df[by] == faixa]
meses = [f'{t[0][:3]} {t[1]}' for t in zip(data['Mês'].values, data['Ano'].values)]
fig.add_trace(go.Scatter(x=meses, y=data[self.qtds].values, mode='lines+markers', name=faixa))
fig.update_layout(title_text=title)
return fig
def predict(self, df, df_new, X_train, X_test):
Y_train = df[self.y]
regressor = RandomForestRegressor()
regressor.fit(X_train, Y_train)
previsoes = regressor.predict(X_test)
previsoes = | pd.DataFrame(previsoes) | pandas.DataFrame |
from __future__ import division
import numpy as np
import pandas as pd
cfs_to_taf = 2.29568411 * 10**-5 * 86400 / 1000
taf_to_cfs = 1000 / 86400 * 43560
def water_day(d):
return d - 274 if d >= 274 else d + 91
def max_release(S):
# rule from http://www.usbr.gov/mp/cvp//cvp-cas/docs/Draft_Findings/130814_tech_memo_flood_control_purpose_hydrology_methods_results.pdf
storage = [90, 100, 400, 600, 975]
# make the last one 130 for future runs
release = cfs_to_taf * np.array([0, 35000, 40000, 115000, 130000])
return np.interp(S, storage, release)
def tocs(d):
# d must be water-year date
# TAF of flood capacity in upstream reservoirs. simplified version.
# approximate values of the curve here:
# http://www.hec.usace.army.mil/publications/ResearchDocuments/RD-48.pdf
tp = [0, 50, 151, 200, 243, 366]
sp = [975, 400, 400, 750, 975, 975]
return np.interp(d, tp, sp)
def volume_to_height(S): # from HOBBES data
sp = [0, 48, 93, 142, 192, 240, 288, 386, 678, 977]
ep = [210, 305, 332, 351, 365, 376, 385, 401, 437, 466]
return np.interp(S, sp, ep)
class Folsom():
def __init__(self, datafile, sd, ed,
fit_historical=False, use_tocs=False,
cc=False, scenario=None, multiobj=False):
self.df = pd.read_csv(datafile, index_col=0, parse_dates=True)[sd:ed]
self.K = 975 # capacity, TAF
self.turbine_elev = 134 # feet
self.turbine_max_release = 8600 # cfs
self.max_safe_release = 130000 # cfs
self.dowy = np.array([water_day(d) for d in self.df.index.dayofyear])
self.D = np.loadtxt('folsom/data/demand.txt')[self.dowy]
self.T = len(self.df.index)
self.fit_historical = fit_historical
self.use_tocs = use_tocs
self.cc = cc
self.multiobj = multiobj
if self.cc:
self.annQs = pd.read_csv(
'folsom/data/folsom-cc-annQ-MA30.csv', index_col=0, parse_dates=True)
self.lp3s = pd.read_csv(
'folsom/data/folsom-cc-lp3-kcfs.csv', index_col=0, parse_dates=True)
self.wycs = pd.read_csv(
'folsom/data/folsom-cc-wycentroid.csv', index_col=0, parse_dates=True)
self.years = self.df.index.year
if scenario:
self.set_scenario(scenario)
else:
self.Q = self.df.inflow.values
def set_scenario(self, s):
self.scenario = s
self.annQ = self.annQs[s].values
self.lp3 = self.lp3s[s].values
self.wyc = self.wycs[s].values
self.Q = self.df[s].values
def f(self, P, mode='optimization'):
T = self.T
S, R, target, shortage_cost, flood_cost = [
np.zeros(T) for _ in range(5)]
K = self.K
D = self.D
Q = self.Q
dowy = self.dowy
R[0] = D[0]
policies = [None]
if not self.cc:
S[0] = self.df.storage.values[0]
else:
S[0] = 500
for t in range(1, T):
if not self.cc:
policy, rules = P.evaluate([S[t - 1], self.dowy[t], Q[t]])
else:
y = self.years[t] - 2000
policy, rules = P.evaluate([S[t - 1], Q[t], dowy[t],
self.annQ[y], self.lp3[y], self.wyc[y]])
if policy == 'Release_Demand':
target[t] = D[t]
elif policy == 'Hedge_90':
target[t] = 0.9 * D[t]
elif policy == 'Hedge_80':
target[t] = 0.8 * D[t]
elif policy == 'Hedge_70':
target[t] = 0.7 * D[t]
elif policy == 'Hedge_60':
target[t] = 0.6 * D[t]
elif policy == 'Hedge_50':
target[t] = 0.5 * D[t]
if self.use_tocs:
target[t] = max(
0.2 * (Q[t] + S[t - 1] - tocs(dowy[t])), target[t])
elif policy == 'Flood_Control':
# target[t] = max_release(S[t-1])
target[t] = max(0.2 * (Q[t] + S[t - 1] - 0.0), 0.0) # default
# for item in rules:
# if item[0] == 'Storage' and not item[2]:
# target[t] = max(0.2*(Q[t] + S[t-1] - item[1]), 0.0)
if mode == 'simulation':
policies.append(policy)
# max/min release
# k = 0.2
R[t] = min(target[t], S[t - 1] + Q[t])
R[t] = min(R[t], max_release(S[t - 1]))
# R[t] = np.clip(R[t], (1-k)*R[t], (1+k)*R[t]) # inertia --
R[t] += max(S[t - 1] + Q[t] - R[t] - K, 0) # spill
S[t] = S[t - 1] + Q[t] - R[t]
# squared deficit. Also penalize any total release over 100 TAF/day
# should be able to vectorize this.
shortage_cost[t] = max(D[t] - R[t], 0)**2 / \
T # + max(R[t]-100, 0)**2
if R[t] > cfs_to_taf * self.max_safe_release:
# flood penalty, high enough to be a constraint
flood_cost[t] += 10**3 * \
(R[t] - cfs_to_taf * self.max_safe_release)
# end of period penalty
# EOP = 0
# if not self.cc and S[-1] < self.df.storage.values[0]:
# EOP = 10**5
if mode == 'simulation' or self.multiobj:
df = self.df.copy()
df['Ss'] = pd.Series(S, index=df.index)
df['Rs'] = | pd.Series(R, index=df.index) | pandas.Series |
import numpy as np
import pandas as pd
def mtr(val, brackets, rates):
"""Calculates the marginal tax rate applied to a value depending on a
tax schedule.
:param val: Value to assess tax on, e.g. wealth or income (list or Series).
:param brackets: Left side of each bracket (list or Series).
:param rates: Rate corresponding to each bracket.
:returns: Series of the size of val representing the marginal tax rate.
"""
df_tax = pd.DataFrame({"brackets": brackets, "rates": rates})
df_tax["base_tax"] = (
df_tax.brackets.sub(df_tax.brackets.shift(fill_value=0))
.mul(df_tax.rates.shift(fill_value=0))
.cumsum()
)
rows = df_tax.brackets.searchsorted(val, side="right") - 1
income_bracket_df = df_tax.loc[rows].reset_index(drop=True)
return income_bracket_df.rates
def tax_from_mtrs(
val,
brackets,
rates,
avoidance_rate=0,
avoidance_elasticity=0,
avoidance_elasticity_flat=0,
):
"""Calculates tax liability based on a marginal tax rate schedule.
:param val: Value to assess tax on, e.g. wealth or income (list or Series).
:param brackets: Left side of each bracket (list or Series).
:param rates: Rate corresponding to each bracket.
:param avoidance_rate: Constant avoidance/evasion rate in percentage terms.
Defaults to zero.
:param avoidance_elasticity: Avoidance/evasion elasticity.
Response of log taxable value with respect
to tax rate.
Defaults to zero. Should be positive.
:param avoidance_elasticity_flat: Response of taxable value with respect
to tax rate.
Use avoidance_elasticity in most cases.
Defaults to zero. Should be positive.
:returns: Series of tax liabilities with the same size as val.
"""
assert (
avoidance_rate == 0
or avoidance_elasticity == 0
or avoidance_elasticity_flat == 0
), "Cannot supply multiple avoidance parameters."
assert (
avoidance_elasticity >= 0
), "Provide nonnegative avoidance_elasticity."
df_tax = pd.DataFrame({"brackets": brackets, "rates": rates})
df_tax["base_tax"] = (
df_tax.brackets.sub(df_tax.brackets.shift(fill_value=0))
.mul(df_tax.rates.shift(fill_value=0))
.cumsum()
)
if avoidance_rate == 0: # Only need MTRs if elasticity is supplied.
mtrs = mtr(val, brackets, rates)
if avoidance_elasticity > 0:
avoidance_rate = 1 - np.exp(-avoidance_elasticity * mtrs)
if avoidance_elasticity_flat > 0:
avoidance_rate = avoidance_elasticity_flat * mtrs
taxable = | pd.Series(val) | pandas.Series |
import time
from datetime import date
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class UserProfile:
path_user_profile_table = '../data/user_profile_table.csv'
data = None
shape = None
def __init__(self):
self.data = pd.read_csv(self.path_user_profile_table)
print("the shape of {} is {}".format(
self.path_user_profile_table, self.data.shape))
def GetMoreUserFeatureFromBalance(self, Balance):
# this function get more feature of user in Balance table
# the Balance is the '../data/user_balance_table.csv'
self.data['city'] = self.data['city'].astype('str')
self.data = pd.get_dummies(self.data)
self.data = self.data.set_index('user_id')
dataid = Balance.data['user_id']
# you can use this method get more features
self.data['mean_total_purchase_amt'] = Balance.data[[
'user_id', 'total_purchase_amt']].groupby(['user_id']).mean()
self.data['std_total_purchase_amt'] = Balance.data[[
'user_id', 'total_purchase_amt']].groupby(['user_id']).std()
self.data['mean_total_redeem_amt'] = Balance.data[[
'user_id', 'total_redeem_amt']].groupby(['user_id']).mean()
self.data['std_total_redeem_amt'] = Balance.data[[
'user_id', 'total_redeem_amt']].groupby(['user_id']).std()
Balance.data['tBalance_minus_yBalance'] = Balance.data['tBalance'] - Balance.data['yBalance'] - Balance.data['share_amt']
Balance.data['tBalance_minus_yBalance'] = Balance.data[Balance.data['tBalance_minus_yBalance'] != 0]
self.data['num_of_effective_operation'] = Balance.data['tBalance_minus_yBalance'].value_counts()
self.data['tBalance_first_use'] = Balance.data[[
'user_id', 'report_date']].groupby(['user_id']).min()
self.data['tBalance_first_use'] = (pd.to_datetime(
'01/09/2014', dayfirst=True)-self.data['tBalance_first_use']).dt.days
self.data = self.data.fillna(0)
self.data['tBalance_first_use'] = self.data['num_of_effective_operation'] / self.data['tBalance_first_use']
self.data = self.data.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))
return self.data.astype('float')
class UserBalance:
path_user_balance_table = '../data/user_balance_table.csv'
day_purchase = None
day_redeem = None
data = None
def __init__(self):
self.data = | pd.read_csv(self.path_user_balance_table, parse_dates=[1]) | pandas.read_csv |
import datetime as dt
from functools import partial
from io import BytesIO, StringIO
from fastapi import HTTPException
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather
import pytest
from solarperformanceinsight_api import utils, models
httpfail = partial(
pytest.param, marks=pytest.mark.xfail(strict=True, raises=HTTPException)
)
@pytest.mark.parametrize(
"inp,typ,exp",
(
(
"time,datas\n2020-01-01T00:00Z,8.9",
StringIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00Z,8.9",
BytesIO,
pd.DataFrame({"time": [ | pd.Timestamp("2020-01-01T00:00Z") | pandas.Timestamp |
from peakaboo.peak_classify import data_grouping
from peakaboo.peak_classify import cluster_classifier
import numpy as np
import pandas as pd
def test_data_grouping():
index_df = np.zeros((2, 2))
height_df = pd.DataFrame([1, 2, 3])
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
try:
data_grouping(index_df, height_df, fwhm_df, threshold)
except AttributeError:
pass
else:
print('Incorrect data type passed', 'Check peak_finding_master output')
index_df = pd.DataFrame()
height_df = pd.DataFrame([1, 2, 3])
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
t = data_grouping(index_df, height_df, fwhm_df, threshold)
assert len(t) == 0, "Index data frame is empty"
index_df = pd.DataFrame([1, 2, 3])
height_df = pd.DataFrame()
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
try:
data_grouping(index_df, height_df, fwhm_df, threshold)
except KeyError:
pass
else:
print('Height data frame empty', 'Check peak_finding_master output')
index_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
height_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
fwhm_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
threshold = 10
t = data_grouping(index_df, height_df, fwhm_df, threshold)
assert len(t) == 0, "Threshold is too high"
def test_cluster_classifier():
index_df = pd.DataFrame([[1, 1, 5], [1, 2, 10], [1, 2, 6]])
corrected_output = | pd.DataFrame() | pandas.DataFrame |
"""
Module of extension functions to be applied to pandas objects (e.g., DataFrame or Series)
Author: <NAME>
Date: 2020-02-03
License: see LICENSE file
"""
import functools
import pandas as pd
import numpy as np
import tqdm
def merge_all(*dfs, suffix_names, suffix_cols, **kwargs):
def _append_suffix(df, suffix_name):
new_cols = []
for col in df.columns:
if col in suffix_cols and suffix_name != '':
col = col + '_' + suffix_name
new_cols.append(col)
df.columns = new_cols
return df
def _combine(left, right):
left_df = _append_suffix(left[0], left[1])
right_df = _append_suffix(right[0], right[1])
merged = left_df.merge(
right_df, **kwargs)
return (merged, '')
sequence = zip(dfs, suffix_names)
if len(suffix_names) == 1:
merged = _append_suffix(dfs[0], suffix_names[0])
else:
tuple_results = functools.reduce(_combine, sequence)
merged = tuple_results[0]
cols_with_suffixes = list(filter(lambda name: name.split('_')
[-1] in suffix_names, merged.columns))
return merged, cols_with_suffixes
def filter_column(df, col, values_to_filter_out=[]):
# remove values
is_valid_values = ~df[col].isin(values_to_filter_out).values
filtered_df = df.loc[is_valid_values, :]
return filtered_df
def parallel_apply(df, func, **kwargs):
from pathos import pools
import os
import numpy as np
cores = os.cpu_count()
data_split = np.array_split(df, cores)
pool = pools.ProcessPool(cores - 4)
apply_func = functools.partial(func, **kwargs)
data = pd.concat(pool.map(apply_func, data_split))
pool.close()
pool.join()
return data
def fast_series_map(s, func, **kwargs):
def _map(value):
result[result == value] = func(value, **kwargs)
result = s.copy()
values = s.unique().tolist()
[_map(value) for value in values]
return result
def segment_by_time(df, seg_st=None, seg_et=None, st_col=0, et_col=None):
et_col = et_col or st_col
seg_st = seg_st or df.iloc[0, st_col]
seg_et = seg_et or df.iloc[-1, et_col]
if st_col == et_col:
mask = (df.iloc[:, st_col] >= seg_st) & (
df.iloc[:, et_col] < seg_et)
return df.loc[mask, :].copy(deep=True)
else:
mask = (df.iloc[:, st_col] <= seg_et) & (
df.iloc[:, et_col] >= seg_st)
subset_df = df[mask].copy(deep=True)
st_col = df.columns[st_col]
et_col = df.columns[et_col]
subset_df.loc[subset_df.loc[:, st_col] <
seg_st, st_col] = seg_st
subset_df.loc[subset_df.loc[:, et_col] >
seg_et, et_col] = seg_et
return subset_df
def get_common_timespan(*dfs, st=None, et=None, st_col=0, et_col=None):
et_col = et_col or st_col
if st is None:
sts = [df.iloc[0, st_col] for df in dfs]
st = pd.Timestamp(np.min(sts))
else:
st = pd.Timestamp(st)
if et is None:
ets = [df.iloc[-1, et_col] for df in dfs]
et = pd.Timestamp(np.max(ets))
else:
et = pd.Timestamp(et)
return st, et
def split_into_windows(*dfs, step_size, st=None, et=None, st_col=0, et_col=None):
st, et = get_common_timespan(
*dfs, st=st, et=et, st_col=st_col, et_col=et_col)
step_size = step_size * 1000
window_start_markers = pd.date_range(
start=st, end=et, freq=f'{step_size}ms', closed='left')
return window_start_markers
def fixed_window_slider(*dfs, slider_fn, window_size, step_size=None, st=None, et=None, st_col=0, et_col=None, show_progress=True, **slider_fn_kwargs):
step_size = step_size or window_size
window_start_markers = split_into_windows(
*dfs, step_size=step_size, st=st, et=et, st_col=st_col, et_col=et_col)
feature_sets = []
if show_progress:
bar = tqdm.tqdm(total=len(window_start_markers))
result_dfs = []
for window_st in window_start_markers:
window_et = window_st + | pd.Timedelta(window_size, unit='s') | pandas.Timedelta |
import os
import glob
import psycopg2
import psycopg2.extras
import pandas as pd
from sql_queries import *
def process_song_file(cur, filepath):
"""
Reads raw data from the data files to split artist and songs
corresponding tables
:param cur: Postgres cursor
:param filepath: A path to a file to process
:return: void
"""
# open song file
df = pd.read_json(filepath, lines=True)
# ideally should be added in batch, iterative approach is just for simplicity
# not sure if it's a good option to store data in array, just testing batch insert
songs = []
artists = []
for index, row in df.iterrows():
songs.append((row.song_id, row.title, row.artist_id, row.year, row.duration))
artists.append((row.artist_id, row.artist_name, row.artist_location,
row.artist_latitude, row.artist_longitude))
psycopg2.extras.execute_batch(cur, song_table_insert, songs)
psycopg2.extras.execute_batch(cur, artist_table_insert, artists)
def process_log_file(cur, filepath):
"""
This function is responsible for splitting raw log data and saving it
in different postgres tables
:param cur: Postgres cursor
:param filepath: A path to a file to process
:return: void
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df.page == 'NextSong']
# convert timestamp column to datetime
df['ts'] = | pd.to_datetime(df['ts'], unit='ms') | pandas.to_datetime |
# -*- coding: utf8 -*-
import pytest
from unittest.mock import Mock
from pandas import DataFrame
import pandas as pd
from scipy import sparse
from sklearn.datasets import load_iris
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import (
StandardScaler, OneHotEncoder, LabelBinarizer)
from sklearn.impute import SimpleImputer as Imputer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.base import BaseEstimator, TransformerMixin
import sklearn.decomposition
import numpy as np
from numpy.testing import assert_array_equal
import pickle
from sklearn.compose import make_column_selector
from sklearn_pandas import DataFrameMapper
from sklearn_pandas.dataframe_mapper import _handle_feature, _build_transformer
from sklearn_pandas.pipeline import TransformerPipeline
class MockXTransformer(object):
"""
Mock transformer that accepts no y argument.
"""
def fit(self, X):
return self
def transform(self, X):
return X
class MockTClassifier(object):
"""
Mock transformer/classifier.
"""
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def predict(self, X):
return True
class DateEncoder():
def fit(self, X, y=None):
return self
def transform(self, X):
dt = X.dt
return pd.concat([dt.year, dt.month, dt.day], axis=1)
class ToSparseTransformer(BaseEstimator, TransformerMixin):
"""
Transforms numpy matrix to sparse format.
"""
def fit(self, X):
return self
def transform(self, X):
return sparse.csr_matrix(X)
class CustomTransformer(BaseEstimator, TransformerMixin):
"""
Example of transformer in which the number of classes
is not equals to the number of output columns.
"""
def fit(self, X, y=None):
self.min = X.min()
self.classes_ = np.unique(X)
return self
def transform(self, X):
classes = np.unique(X)
if len(np.setdiff1d(classes, self.classes_)) > 0:
raise ValueError('Unknown values found.')
return X - self.min
@pytest.fixture
def simple_dataframe():
return pd.DataFrame({'a': [1, 2, 3]})
@pytest.fixture
def complex_dataframe():
return pd.DataFrame({'target': ['a', 'a', 'b', 'b', 'c', 'c'],
'feat1': [1, 2, 3, 4, 5, 6],
'feat2': [1, 2, 3, 2, 3, 4]})
@pytest.fixture
def multiindex_dataframe():
"""Example MultiIndex DataFrame, taken from pandas documentation
"""
iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']]
index = pd.MultiIndex.from_product(iterables, names=['first', 'second'])
df = pd.DataFrame(np.random.randn(10, 8), columns=index)
return df
@pytest.fixture
def multiindex_dataframe_incomplete(multiindex_dataframe):
"""Example MultiIndex DataFrame with missing entries
"""
df = multiindex_dataframe
mask_array = np.zeros(df.size)
mask_array[:20] = 1
np.random.shuffle(mask_array)
mask = mask_array.reshape(df.shape).astype(bool)
df.mask(mask, inplace=True)
return df
def test_transformed_names_simple(simple_dataframe):
"""
Get transformed names of features in `transformed_names` attribute
for simple transformation
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', None)])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['a']
def test_transformed_names_binarizer(complex_dataframe):
"""
Get transformed names of features in `transformed_names` attribute
for a transformation that multiplies the number of columns
"""
df = complex_dataframe
mapper = DataFrameMapper([('target', LabelBinarizer())])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['target_a', 'target_b', 'target_c']
def test_logging(caplog, complex_dataframe):
"""
Get transformed names of features in `transformed_names` attribute
for a transformation that multiplies the number of columns
"""
import logging
logger = logging.getLogger('sklearn_pandas')
logger.setLevel(logging.INFO)
df = complex_dataframe
mapper = DataFrameMapper([('target', LabelBinarizer())])
mapper.fit_transform(df)
assert '[FIT_TRANSFORM] target:' in caplog.text
def test_transformed_names_binarizer_unicode():
df = pd.DataFrame({'target': [u'ñ', u'á', u'é']})
mapper = DataFrameMapper([('target', LabelBinarizer())])
mapper.fit_transform(df)
expected_names = {u'target_ñ', u'target_á', u'target_é'}
assert set(mapper.transformed_names_) == expected_names
def test_transformed_names_transformers_list(complex_dataframe):
"""
When using a list of transformers, use them in inverse order to get the
transformed names
"""
df = complex_dataframe
mapper = DataFrameMapper([
('target', [LabelBinarizer(), MockXTransformer()])
])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['target_a', 'target_b', 'target_c']
def test_transformed_names_simple_alias(simple_dataframe):
"""
If we specify an alias for a single output column, it is used for the
output
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', None, {'alias': 'new_name'})])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['new_name']
def test_transformed_names_complex_alias(complex_dataframe):
"""
If we specify an alias for a multiple output column, it is used for the
output
"""
df = complex_dataframe
mapper = DataFrameMapper([('target', LabelBinarizer(), {'alias': 'new'})])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['new_a', 'new_b', 'new_c']
def test_exception_column_context_transform(simple_dataframe):
"""
If an exception is raised when transforming a column,
the exception includes the name of the column being transformed
"""
class FailingTransformer(object):
def fit(self, X):
pass
def transform(self, X):
raise Exception('Some exception')
df = simple_dataframe
mapper = DataFrameMapper([('a', FailingTransformer())])
mapper.fit(df)
with pytest.raises(Exception, match='a: Some exception'):
mapper.transform(df)
def test_exception_column_context_fit(simple_dataframe):
"""
If an exception is raised when fit a column,
the exception includes the name of the column being fitted
"""
class FailingFitter(object):
def fit(self, X):
raise Exception('Some exception')
df = simple_dataframe
mapper = DataFrameMapper([('a', FailingFitter())])
with pytest.raises(Exception, match='a: Some exception'):
mapper.fit(df)
def test_simple_df(simple_dataframe):
"""
Get a dataframe from a simple mapped dataframe
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', None)], df_out=True)
transformed = mapper.fit_transform(df)
assert type(transformed) == pd.DataFrame
assert len(transformed["a"]) == len(simple_dataframe["a"])
def test_complex_df(complex_dataframe):
"""
Get a dataframe from a complex mapped dataframe
"""
df = complex_dataframe
mapper = DataFrameMapper(
[('target', None), ('feat1', None), ('feat2', None)],
df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(complex_dataframe)
for c in df.columns:
assert len(transformed[c]) == len(df[c])
def test_numeric_column_names(complex_dataframe):
"""
Get a dataframe from a complex mapped dataframe with numeric column names
"""
df = complex_dataframe
df.columns = [0, 1, 2]
mapper = DataFrameMapper(
[(0, None), (1, None), (2, None)], df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(complex_dataframe)
for c in df.columns:
assert len(transformed[c]) == len(df[c])
def test_multiindex_df(multiindex_dataframe_incomplete):
"""
Get a dataframe from a multiindex dataframe with missing data
"""
df = multiindex_dataframe_incomplete
mapper = DataFrameMapper([([c], Imputer()) for c in df.columns],
df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(multiindex_dataframe_incomplete)
for c in df.columns:
assert len(transformed[str(c)]) == len(df[c])
def test_binarizer_df():
"""
Check level names from LabelBinarizer
"""
df = pd.DataFrame({'target': ['a', 'a', 'b', 'b', 'c', 'a']})
mapper = DataFrameMapper([('target', LabelBinarizer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 3
assert cols[0] == 'target_a'
assert cols[1] == 'target_b'
assert cols[2] == 'target_c'
def test_binarizer_int_df():
"""
Check level names from LabelBinarizer for a numeric array.
"""
df = pd.DataFrame({'target': [5, 5, 6, 6, 7, 5]})
mapper = DataFrameMapper([('target', LabelBinarizer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 3
assert cols[0] == 'target_5'
assert cols[1] == 'target_6'
assert cols[2] == 'target_7'
def test_binarizer2_df():
"""
Check level names from LabelBinarizer with just one output column
"""
df = pd.DataFrame({'target': ['a', 'a', 'b', 'b', 'a']})
mapper = DataFrameMapper([('target', LabelBinarizer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 1
assert cols[0] == 'target'
def test_onehot_df():
"""
Check level ids from one-hot
"""
df = pd.DataFrame({'target': [0, 0, 1, 1, 2, 3, 0]})
mapper = DataFrameMapper([(['target'], OneHotEncoder())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 4
assert cols[0] == 'target_x0_0'
assert cols[3] == 'target_x0_3'
def test_customtransform_df():
"""
Check level ids from a transformer in which
the number of classes is not equals to the number of output columns.
"""
df = pd.DataFrame({'target': [6, 5, 7, 5, 4, 8, 8]})
mapper = DataFrameMapper([(['target'], CustomTransformer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(mapper.features[0][1].classes_) == 5
assert len(cols) == 1
assert cols[0] == 'target'
def test_preserve_df_index():
"""
The index is preserved when df_out=True
"""
df = pd.DataFrame({'target': [1, 2, 3]},
index=['a', 'b', 'c'])
mapper = DataFrameMapper([('target', None)],
df_out=True)
transformed = mapper.fit_transform(df)
assert_array_equal(transformed.index, df.index)
def test_preserve_df_index_rows_dropped():
"""
If df_out=True but the original df index length doesn't
match the number of final rows, use a numeric index
"""
class DropLastRowTransformer(object):
def fit(self, X):
return self
def transform(self, X):
return X[:-1]
df = pd.DataFrame({'target': [1, 2, 3]},
index=['a', 'b', 'c'])
mapper = DataFrameMapper([('target', DropLastRowTransformer())],
df_out=True)
transformed = mapper.fit_transform(df)
assert_array_equal(transformed.index, np.array([0, 1]))
def test_pca(complex_dataframe):
"""
Check multi in and out with PCA
"""
df = complex_dataframe
mapper = DataFrameMapper(
[(['feat1', 'feat2'], sklearn.decomposition.PCA(2))],
df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 2
assert cols[0] == 'feat1_feat2_0'
assert cols[1] == 'feat1_feat2_1'
def test_fit_transform(simple_dataframe):
"""
Check that custom fit_transform methods of the transformers are invoked.
"""
df = simple_dataframe
mock_transformer = Mock()
# return something of measurable length but does nothing
mock_transformer.fit_transform.return_value = np.array([1, 2, 3])
mapper = DataFrameMapper([("a", mock_transformer)])
mapper.fit_transform(df)
assert mock_transformer.fit_transform.called
def test_fit_transform_equiv_mock(simple_dataframe):
"""
Check for equivalent results for code paths fit_transform
versus fit and transform in DataFrameMapper using the mock
transformer which does not implement a custom fit_transform.
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', MockXTransformer())])
transformed_combined = mapper.fit_transform(df)
transformed_separate = mapper.fit(df).transform(df)
assert np.all(transformed_combined == transformed_separate)
def test_fit_transform_equiv_pca(complex_dataframe):
"""
Check for equivalent results for code paths fit_transform
versus fit and transform in DataFrameMapper and transformer
using PCA which implements a custom fit_transform. The
equivalence of both paths in the transformer only can be
asserted since this is tested in the sklearn tests
scikit-learn/sklearn/decomposition/tests/test_pca.py
"""
df = complex_dataframe
mapper = DataFrameMapper(
[(['feat1', 'feat2'], sklearn.decomposition.PCA(2))],
df_out=True)
transformed_combined = mapper.fit_transform(df)
transformed_separate = mapper.fit(df).transform(df)
assert np.allclose(transformed_combined, transformed_separate)
def test_input_df_true_first_transformer(simple_dataframe, monkeypatch):
"""
If input_df is True, the first transformer is passed
a pd.Series instead of an np.array
"""
df = simple_dataframe
monkeypatch.setattr(MockXTransformer, 'fit', Mock())
monkeypatch.setattr(MockXTransformer, 'transform',
Mock(return_value=np.array([1, 2, 3])))
mapper = DataFrameMapper([
('a', MockXTransformer())
], input_df=True)
out = mapper.fit_transform(df)
args, _ = MockXTransformer().fit.call_args
assert isinstance(args[0], pd.Series)
args, _ = MockXTransformer().transform.call_args
assert isinstance(args[0], pd.Series)
assert_array_equal(out, np.array([1, 2, 3]).reshape(-1, 1))
def test_input_df_true_next_transformers(simple_dataframe, monkeypatch):
"""
If input_df is True, the subsequent transformers get passed pandas
objects instead of numpy arrays (given the previous transformers
output pandas objects as well)
"""
df = simple_dataframe
monkeypatch.setattr(MockTClassifier, 'fit', Mock())
monkeypatch.setattr(MockTClassifier, 'transform',
Mock(return_value=pd.Series([1, 2, 3])))
mapper = DataFrameMapper([
('a', [MockXTransformer(), MockTClassifier()])
], input_df=True)
mapper.fit(df)
out = mapper.transform(df)
args, _ = MockTClassifier().fit.call_args
assert isinstance(args[0], pd.Series)
assert_array_equal(out, np.array([1, 2, 3]).reshape(-1, 1))
def test_input_df_true_multiple_cols(complex_dataframe):
"""
When input_df is True, applying transformers to multiple columns
works as expected
"""
df = complex_dataframe
mapper = DataFrameMapper([
('target', MockXTransformer()),
('feat1', MockXTransformer()),
], input_df=True)
out = mapper.fit_transform(df)
assert_array_equal(out[:, 0], df['target'].values)
assert_array_equal(out[:, 1], df['feat1'].values)
def test_input_df_date_encoder():
"""
When input_df is True we can apply a transformer that only works
with pandas dataframes like a DateEncoder
"""
df = pd.DataFrame(
{'dates': pd.date_range('2015-10-30', '2015-11-02')})
mapper = DataFrameMapper([
('dates', DateEncoder())
], input_df=True)
out = mapper.fit_transform(df)
expected = np.array([
[2015, 10, 30],
[2015, 10, 31],
[2015, 11, 1],
[2015, 11, 2]
])
assert_array_equal(out, expected)
def test_local_input_df_date_encoder():
"""
When input_df is True we can apply a transformer that only works
with pandas dataframes like a DateEncoder
"""
df = pd.DataFrame(
{'dates': pd.date_range('2015-10-30', '2015-11-02')})
mapper = DataFrameMapper([
('dates', DateEncoder(), {'input_df': True})
], input_df=False)
out = mapper.fit_transform(df)
expected = np.array([
[2015, 10, 30],
[2015, 10, 31],
[2015, 11, 1],
[2015, 11, 2]
])
assert_array_equal(out, expected)
def test_nonexistent_columns_explicit_fail(simple_dataframe):
"""
If a nonexistent column is selected, KeyError is raised.
"""
mapper = DataFrameMapper(None)
with pytest.raises(KeyError):
mapper._get_col_subset(simple_dataframe, ['nonexistent_feature'])
def test_get_col_subset_single_column_array(simple_dataframe):
"""
Selecting a single column should return a 1-dimensional numpy array.
"""
mapper = DataFrameMapper(None)
array = mapper._get_col_subset(simple_dataframe, "a")
assert type(array) == np.ndarray
assert array.shape == (len(simple_dataframe["a"]),)
def test_get_col_subset_single_column_list(simple_dataframe):
"""
Selecting a list of columns (even if the list contains a single element)
should return a 2-dimensional numpy array.
"""
mapper = DataFrameMapper(None)
array = mapper._get_col_subset(simple_dataframe, ["a"])
assert type(array) == np.ndarray
assert array.shape == (len(simple_dataframe["a"]), 1)
def test_cols_string_array(simple_dataframe):
"""
If a string is specified as the columns, the transformer
is called with a 1-d array as input.
"""
df = simple_dataframe
mock_transformer = Mock()
mapper = DataFrameMapper([("a", mock_transformer)])
mapper.fit(df)
args, kwargs = mock_transformer.fit.call_args
assert args[0].shape == (3,)
def test_cols_list_column_vector(simple_dataframe):
"""
If a one-element list is specified as the columns, the transformer
is called with a column vector as input.
"""
df = simple_dataframe
mock_transformer = Mock()
mapper = DataFrameMapper([(["a"], mock_transformer)])
mapper.fit(df)
args, kwargs = mock_transformer.fit.call_args
assert args[0].shape == (3, 1)
def test_handle_feature_2dim():
"""
2-dimensional arrays are returned unchanged.
"""
array = np.array([[1, 2], [3, 4]])
assert_array_equal(_handle_feature(array), array)
def test_handle_feature_1dim():
"""
1-dimensional arrays are converted to 2-dimensional column vectors.
"""
array = np.array([1, 2])
assert_array_equal(_handle_feature(array), np.array([[1], [2]]))
def test_build_transformers():
"""
When a list of transformers is passed, return a pipeline with
each element of the iterable as a step of the pipeline.
"""
transformers = [MockTClassifier(), MockTClassifier()]
pipeline = _build_transformer(transformers)
assert isinstance(pipeline, Pipeline)
for ix, transformer in enumerate(transformers):
assert pipeline.steps[ix][1] == transformer
def test_selected_columns():
"""
selected_columns returns a set of the columns appearing in the features
of the mapper.
"""
mapper = DataFrameMapper([
('a', None),
(['a', 'b'], None)
])
assert mapper._selected_columns == {'a', 'b'}
def test_unselected_columns():
"""
unselected_columns returns a list of the columns not appearing in the
features of the mapper but present in the given dataframe.
"""
df = pd.DataFrame({'a': [1], 'b': [2], 'c': [3]})
mapper = DataFrameMapper([
('a', None),
(['a', 'b'], None)
])
assert 'c' in mapper._unselected_columns(df)
def test_drop_and_default_false():
"""
If default=False, non explicitly selected columns and drop columns
are discarded.
"""
df = pd.DataFrame({'a': [1], 'b': [2], 'c': [3]})
mapper = DataFrameMapper([
('a', None)
], drop_cols=['c'], default=False)
transformed = mapper.fit_transform(df)
assert transformed.shape == (1, 1)
assert mapper.transformed_names_ == ['a']
def test_drop_and_default_none():
"""
If default=None, drop columns are discarded and
remaining non explicitly selected columns are passed through untransformed
"""
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 5, 7]})
mapper = DataFrameMapper([
('a', None)
], drop_cols=['c'], default=None)
transformed = mapper.fit_transform(df)
assert transformed.shape == (3, 2)
assert mapper.transformed_names_ == ['a', 'b']
def test_conflicting_drop():
"""
Drop column name shouldn't get confused with transformed columns.
"""
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 5, 7]})
mapper = DataFrameMapper([
('a', None)
], drop_cols=['a'], default=False)
transformed = mapper.fit_transform(df)
assert transformed.shape == (3, 1)
assert mapper.transformed_names_ == ['a']
def test_default_false():
"""
If default=False, non explicitly selected columns are discarded.
"""
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 5, 7]})
mapper = DataFrameMapper([
('b', None)
], default=False)
transformed = mapper.fit_transform(df)
assert transformed.shape == (3, 1)
def test_default_none():
"""
If default=None, non explicitly selected columns are passed through
untransformed.
"""
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 5, 7]})
mapper = DataFrameMapper([
(['a'], OneHotEncoder())
], default=None)
transformed = mapper.fit_transform(df)
assert (transformed[:, 3] == np.array([3, 5, 7]).T).all()
def test_default_none_names():
"""
If default=None, column names are returned unmodified.
"""
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 5, 7]})
mapper = DataFrameMapper([], default=None)
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['a', 'b']
def test_default_transformer():
"""
If default=Transformer, non explicitly selected columns are applied this
transformer.
"""
df = pd.DataFrame({'a': [1, np.nan, 3], })
mapper = DataFrameMapper([], default=Imputer())
transformed = mapper.fit_transform(df)
assert (transformed[: 0] == np.array([1., 2., 3.])).all()
def test_list_transformers_single_arg(simple_dataframe):
"""
Multiple transformers can be specified in a list even if some of them
only accept one X argument instead of two (X, y).
"""
mapper = DataFrameMapper([
('a', [MockXTransformer()])
])
# doesn't fail
mapper.fit_transform(simple_dataframe)
def test_list_transformers():
"""
Specifying a list of transformers applies them sequentially to the
selected column.
"""
dataframe = pd.DataFrame({"a": [1, np.nan, 3], "b": [1, 5, 7]},
dtype=np.float64)
mapper = DataFrameMapper([
(["a"], [Imputer(), StandardScaler()]),
(["b"], StandardScaler()),
])
dmatrix = mapper.fit_transform(dataframe)
assert pd.isnull(dmatrix).sum() == 0 # no null values
# all features have mean 0 and std deviation 1 (standardized)
assert (abs(dmatrix.mean(axis=0) - 0) <= 1e-6).all()
assert (abs(dmatrix.std(axis=0) - 1) <= 1e-6).all()
def test_list_transformers_old_unpickle(simple_dataframe):
mapper = DataFrameMapper(None)
# simulate the mapper was created with < 1.0.0 code
mapper.features = [('a', [MockXTransformer()])]
mapper_pickled = pickle.dumps(mapper)
loaded_mapper = pickle.loads(mapper_pickled)
transformer = loaded_mapper.features[0][1]
assert isinstance(transformer, TransformerPipeline)
assert isinstance(transformer.steps[0][1], MockXTransformer)
def test_sparse_features(simple_dataframe):
"""
If any of the extracted features is sparse and "sparse" argument
is true, the hstacked result is also sparse.
"""
df = simple_dataframe
mapper = DataFrameMapper([
("a", ToSparseTransformer())
], sparse=True)
dmatrix = mapper.fit_transform(df)
assert type(dmatrix) == sparse.csr.csr_matrix
def test_sparse_off(simple_dataframe):
"""
If the resulting features are sparse but the "sparse" argument
of the mapper is False, return a non-sparse matrix.
"""
df = simple_dataframe
mapper = DataFrameMapper([
("a", ToSparseTransformer())
], sparse=False)
dmatrix = mapper.fit_transform(df)
assert type(dmatrix) != sparse.csr.csr_matrix
def test_fit_with_optional_y_arg(complex_dataframe):
"""
Transformers with an optional y argument in the fit method
are handled correctly
"""
df = complex_dataframe
mapper = DataFrameMapper([(['feat1', 'feat2'], MockTClassifier())])
# doesn't fail
mapper.fit(df[['feat1', 'feat2']], df['target'])
def test_fit_with_required_y_arg(complex_dataframe):
"""
Transformers with a required y argument in the fit method
are handled and perform correctly
"""
df = complex_dataframe
mapper = DataFrameMapper([(['feat1', 'feat2'], SelectKBest(chi2, k=1))])
# fit, doesn't fail
ft_arr = mapper.fit(df[['feat1', 'feat2']], df['target'])
# fit_transform
ft_arr = mapper.fit_transform(df[['feat1', 'feat2']], df['target'])
assert_array_equal(ft_arr, df[['feat1']].values)
# transform
t_arr = mapper.transform(df[['feat1', 'feat2']])
assert_array_equal(t_arr, df[['feat1']].values)
# Integration tests with real dataframes
@pytest.fixture
def iris_dataframe():
iris = load_iris()
return DataFrame(
data={
iris.feature_names[0]: iris.data[:, 0],
iris.feature_names[1]: iris.data[:, 1],
iris.feature_names[2]: iris.data[:, 2],
iris.feature_names[3]: iris.data[:, 3],
"species": np.array([iris.target_names[e] for e in iris.target])
}
)
@pytest.fixture
def cars_dataframe():
return | pd.read_csv("tests/test_data/cars.csv.gz", compression='gzip') | pandas.read_csv |
from random import choice
from tkinter import *
import pandas
BACKGROUND_COLOR = "#B1DDC6"
current_card = {}
to_learn = {}
# -------------------------------- CSV TO DICT --------------------------------- #
try:
data = | pandas.read_csv("data/words.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import os.path
from sklearn.model_selection import train_test_split
# Create GROUND TRUTH dataset
def ground_truth():
dir = os.getcwd() # Gets the current working directory
df = pd.read_csv(dir + '\\dataset\\train\\imbalanced_tweets.csv')
X_train, X_test, y_train, y_test = train_test_split(df['tweet'], df['label'], test_size=0.10, random_state = 42)
# Clear and combine datasets
train = pd.DataFrame(list(zip(y_train, X_train)), columns=['label', 'tweet'])
test = pd.DataFrame(list(zip(y_test, X_test)), columns=['label', 'tweet'])
train = train.sample(frac=1).reset_index(drop=True)
test = test.sample(frac=1).reset_index(drop=True)
count_0, count_1 = train['label'].value_counts()
print(count_1, count_0)
count_0, count_1 = test['label'].value_counts()
print(count_1, count_0)
train.head(20)
test.head(20)
train.to_csv(dir + '\\dataset\\train\\training_imbalanced_temp.csv')
test.to_csv(dir + '\\dataset\\train\\ground_truth.csv')
print("END SCRIPT")
# CREATE BALANCED DATASET
def balance_dataset():
dir = os.getcwd() # Gets the current working directory
train_file_A = dir + '\\dataset\\train\\training_imbalanced_temp.csv'
train_A = pd.read_csv(train_file_A)
# Drop the first column of reading file
train_A.drop(['numb'], axis=1, inplace=True)
label_0 = train_A.loc[train_A['label'] == 0]
label_1 = train_A.loc[train_A['label'] == 1]
print("label 0: ", label_0)
print("label 1: ", label_1)
getIndex= list()
while len(getIndex) < len(label_1):
for i in range(label_0.shape[0]):
if np.random.uniform(0, 1) < 0.54 and i not in getIndex:
getIndex.append(i)
print(len(getIndex), len(label_1))
getData = label_0.iloc[getIndex]
print(getData)
# Clear and combine datasets
df_final = pd.concat([label_1, getData])
df_final = df_final.sample(frac=1).reset_index(drop=True)
df_final.head(20)
count_0, count_1 = df_final['label'].value_counts()
print(count_1, count_0)
df_final.to_csv(dir + '\\dataset\\train\\imbalanced_training.csv')
print("END SCRIPT")
# Get as many non-depressive tweets as depressive tweets from SCRAPING
def Combine_Scraped_and_Positive_tweets():
dir = os.getcwd() # Gets the current working directory
# Positive tweets
train_file_A = dir + '\\dataset\\train\\general_tweets.csv'
train_A = pd.read_csv(train_file_A)
# Drop the first column of reading file
train_A.drop(['numb'], axis=1, inplace=True)
# Scraped tweets
train_file_B = dir + '\\dataset\\train\\depress\\ALL_tweets_final.csv'
label_1 = pd.read_csv(train_file_B)
# Drop the first column of reading file
label_1.drop(['Unnamed: 0'], axis=1, inplace=True)
label_1.drop(['id'], axis=1, inplace=True)
label_1.drop(['conversation_id'], axis=1, inplace=True)
label_1.drop(['date'], axis=1, inplace=True)
label_1.drop(['username'], axis=1, inplace=True)
label_1.drop(['hashtags'], axis=1, inplace=True)
label_1.drop(['tweet_original'], axis=1, inplace=True)
label_0 = train_A.loc[train_A['label'] == 0]
print("label 0: ", label_0)
print("label 1: ", label_1)
getIndex= list()
while len(getIndex) < len(label_1):
for i in range(label_0.shape[0]):
if np.random.uniform(0, 1) < 0.32 and i not in getIndex:
getIndex.append(i)
print(len(getIndex), len(label_1))
getData = label_0.iloc[getIndex]
print(getData)
# Clear and combine datasets
df_final = pd.concat([label_1, getData])
df_final = df_final.sample(frac=1).reset_index(drop=True)
df_final.head(20)
print(df_final['label'].value_counts())
df_final.to_csv(dir + '\\dataset\\train\\POSITIVE_DEPRESSED_SCRAPED.csv')
print("END SCRIPT")
# COMBINE DATASETS
def combine_datasets():
dir = os.getcwd() # Gets the current working directory
#train_file_A = dir + '\\dataset\\train\\depression_tweets.txt'
train_file_A = dir + '\\dataset\\train\\TEMP_ALL_SPLIT_tweets_final.csv'
train_A = | pd.read_csv(train_file_A) | pandas.read_csv |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from pathlib import Path
from typing import Any, Dict, List
from pandas import DataFrame, concat
from lib.cast import age_group, safe_int_cast
from lib.data_source import DataSource
from lib.io import read_file
from lib.constants import SRC
from lib.concurrent import thread_map
from lib.time import datetime_isoformat
from lib.utils import table_rename
_dashboard_column_adapter = {
"key": "key",
"date": "date",
"casConfirmes": "total_confirmed",
"deces": "total_deceased",
"testsPositifs": "new_confirmed",
"testsRealises": "new_tested",
"gueris": "new_recovered",
"hospitalises": "current_hospitalized",
"reanimation": "current_intensive_care",
}
_gouv_column_adapter = {
"date": "date",
"dep": "subregion2_code",
"reg": "subregion1_code",
"hosp": "current_hospitalized",
"incid_hosp": "new_hospitalized",
"rea": "current_intensive_care",
"incid_rea": "new_intensive_care",
"dc_tot": "total_deceased",
"conf": "total_confirmed",
"conf_j1": "new_confirmed",
}
def _get_region(
url_tpl: str, column_adapter: Dict[str, str], iso_map: Dict[str, str], subregion1_code: str
):
code = iso_map[subregion1_code]
data = read_file(url_tpl.format(code))
data["key"] = f"FR_{subregion1_code}"
return table_rename(data, column_adapter, drop=True)
def _get_department(url_tpl: str, column_adapter: Dict[str, str], record: Dict[str, str]):
subregion1_code = record["subregion1_code"]
subregion2_code = record["subregion2_code"]
code = f"DEP-{subregion2_code}"
data = read_file(url_tpl.format(code))
data["key"] = f"FR_{subregion1_code}_{subregion2_code}"
return table_rename(data, column_adapter, drop=True)
def _get_country(url_tpl: str, column_adapter: Dict[str, str]):
data = read_file(url_tpl.format("FRA"))
data["key"] = "FR"
return table_rename(data, column_adapter, drop=True)
class FranceDashboardDataSource(DataSource):
def fetch(
self,
output_folder: Path,
cache: Dict[str, str],
fetch_opts: List[Dict[str, Any]],
skip_existing: bool = False,
) -> Dict[Any, str]:
# URL is just a template, so pass-through the URL to parse manually
return {idx: source["url"] for idx, source in enumerate(fetch_opts)}
def parse(self, sources: Dict[Any, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
url_tpl = sources[0]
metadata = aux["metadata"]
metadata = metadata[metadata["country_code"] == "FR"]
fr_isos = read_file(SRC / "data" / "fr_iso_codes.csv")
fr_iso_map = {iso: code for iso, code in zip(fr_isos["iso_code"], fr_isos["region_code"])}
fr_codes = metadata[["subregion1_code", "subregion2_code"]].dropna()
regions_iter = fr_codes["subregion1_code"].unique()
deps_iter = [record for _, record in fr_codes.iterrows()]
# For country level, there is no need to estimate confirmed from tests
column_adapter_country = dict(_dashboard_column_adapter)
column_adapter_country.pop("testsPositifs")
# Get country level data
country = _get_country(url_tpl, column_adapter_country)
# Country level data has totals instead of diffs, so we compute the diffs by hand
country.sort_values("date", inplace=True)
country["new_confirmed"] = country["total_confirmed"].diff()
country.drop(columns=["total_confirmed"], inplace=True)
# For region level, we can only estimate confirmed from tests
column_adapter_region = dict(_dashboard_column_adapter)
column_adapter_region.pop("casConfirmes")
# Get region level data
get_region_func = partial(_get_region, url_tpl, column_adapter_region, fr_iso_map)
regions = concat(list(thread_map(get_region_func, regions_iter)))
# Get department level data
get_department_func = partial(_get_department, url_tpl, column_adapter_region)
departments = concat(list(thread_map(get_department_func, deps_iter)))
data = | concat([country, regions, departments]) | pandas.concat |
import pandas as pd
import numpy as np
class EncodingBase(object):
def get_GAM_df(self, x_values_lookup=None, **kwargs):
# Make x_values_lookup as onehot encoding!
if x_values_lookup is not None:
x_values_lookup = self.convert_x_values_lookup(x_values_lookup)
# Get the original DF
df = super().get_GAM_df(x_values_lookup, **kwargs)
# Used in the bagging. As we already revert it back, no need to do it here.
if hasattr(self, 'not_revert') and self.not_revert:
return df
# change it back to non-onehot encoding df
return self.revert_dataframe(df)
def convert_x_values_lookup(self, x_values_lookup=None):
raise NotImplementedError()
def revert_dataframe(self, df):
raise NotImplementedError()
class LabelEncodingFitMixin(EncodingBase):
def convert_x_values_lookup(self, x_values_lookup=None):
need_label_encoding = hasattr(self, 'cat_columns') and len(self.cat_columns) > 0 and x_values_lookup is not None
if not need_label_encoding:
return x_values_lookup
x_values_lookup = x_values_lookup.copy()
self.cat_x_values_lookup = {c: x_values_lookup[c] for c in self.cat_columns}
for c in self.cat_columns:
val = self.cat_to_num_dict[c][x_values_lookup[c]].values
x_values_lookup[c] = val[~np.isnan(val)]
return x_values_lookup
def revert_dataframe(self, df):
need_label_encoding = hasattr(self, 'cat_columns') and len(self.cat_columns) > 0
if not need_label_encoding:
return df
df_lookup = df.set_index('feat_name')
for c in self.cat_columns:
df_lookup.at[c, 'x'] = self.num_to_cat_dict[c][df_lookup.loc[c, 'x']].values
if not hasattr(self, 'cat_x_values_lookup'):
continue
row = df_lookup.loc[c]
orig_x = self.cat_x_values_lookup[c]
if len(row.x) == len(orig_x) and np.all(np.array(row.x) == np.array(orig_x)):
continue
cat_x = list(row.x) + list(orig_x)
cat_y = list(row.y) + [0.] * len(orig_x)
final_x, index = np.unique(cat_x, return_index=True)
final_y = np.array(cat_y)[index]
df_lookup.at[c, 'x'] = final_x
df_lookup.at[c, 'y'] = final_y
if 'y_std' in df_lookup:
cat_y_std = list(row.y_std) + [0.] * len(orig_x)
df_lookup.at[c, 'y_std'] = np.array(cat_y_std)[index]
df = df_lookup.reset_index()
return df
def fit(self, X, y, **kwargs):
if isinstance(X, pd.DataFrame): # in bagging, the coming X is from numpy. Don't transform
self.my_fit(X, y)
X = self.my_transform(X)
return super().fit(X, y, **kwargs)
def my_fit(self, X, y):
self.cat_columns = X.columns[X.dtypes == object].values.tolist()
self.num_to_cat_dict, self.cat_to_num_dict = {}, {}
for c in self.cat_columns:
tmp = X[c].astype('category').cat
self.num_to_cat_dict[c] = pd.Series(tmp.categories)
self.cat_to_num_dict[c] = pd.Series(range(len(tmp.categories)), index=tmp.categories.values)
return X
def my_transform(self, X):
X = X.copy()
for c in self.cat_columns:
val = self.cat_to_num_dict[c][X[c].values]
val = val.fillna(0) # randomly assigned to a class
X.loc[:, c] = val.values
return X
class LabelEncodingClassifierMixin(LabelEncodingFitMixin):
def predict_proba(self, X):
# in bagging, the coming X is from numpy. Don't transform
if isinstance(X, pd.DataFrame) and hasattr(self, 'cat_columns') and len(self.cat_columns) > 0:
X = self.my_transform(X)
return super().predict_proba(X)
class LabelEncodingRegressorMixin(LabelEncodingFitMixin):
def predict(self, X):
# in bagging, the coming X is from numpy. Don't transform
if isinstance(X, pd.DataFrame) and hasattr(self, 'cat_columns') and len(self.cat_columns) > 0:
X = self.my_transform(X)
return super().predict(X)
class OnehotEncodingFitMixin(EncodingBase):
def convert_x_values_lookup(self, x_values_lookup=None):
need_label_encoding = hasattr(self, 'cat_columns') and len(self.cat_columns) > 0 and x_values_lookup is not None
if not need_label_encoding:
return x_values_lookup
x_values_lookup = x_values_lookup.copy()
# record it
self.cat_x_values_lookup = {c: x_values_lookup[c] for c in self.cat_columns}
for c in self.cat_columns:
del x_values_lookup[c]
for feat_name in self.feature_names:
if feat_name not in x_values_lookup:
x_values_lookup[feat_name] = np.array(list(self.X_values_counts[feat_name].keys()))
return x_values_lookup
def revert_dataframe(self, df):
''' Move the old onehot-encoding df to new non-onehot encoding one '''
need_label_encoding = hasattr(self, 'cat_columns') and len(self.cat_columns) > 0
if not need_label_encoding:
return df
overall_logic_kept = None
onehot_features = []
for c in self.cat_columns:
logic = df.feat_name.apply(lambda x: x.startswith(c + '_'))
overall_logic_kept = logic if overall_logic_kept is None else (logic | overall_logic_kept)
filtered = df[logic].copy()
filtered['new_y_val'] = filtered.y.apply(lambda x: (x[1] - x[0]) if len(x) == 2 else 0.)
# Record it into the X_values_counts
if c not in self.X_values_counts:
values = [self.X_values_counts[f][1] if 1 in self.X_values_counts[f] else 0 for f in filtered.feat_name]
keys = filtered.feat_name.apply(lambda x: x[(len(c)+1):])
self.X_values_counts[c] = dict(zip(keys, values))
filtered['proportion'] = list(self.X_values_counts[c].values())
offset = np.average(filtered.new_y_val.values, weights=filtered.proportion.values)
filtered.new_y_val -= offset
importance = np.average(np.abs(filtered.new_y_val.values), weights=filtered.proportion.values)
# Use indep Gaussian to estimate y_std
if 'y_std' in filtered:
new_y_std = filtered.y_std.apply(lambda x: np.sqrt(x[0] ** 2 + x[1] ** 2) if len(x) == 2 else 0.)
onehot_features.append(dict(
feat_name=c,
feat_idx=None,
x=filtered.feat_name.apply(lambda x: x[(len(c)+1):]).values.tolist(),
y=filtered.new_y_val.values.tolist(),
importance=importance,
**({'y_std': new_y_std.values.tolist()} if 'y_std' in filtered else {})
))
onehot_df = pd.DataFrame(onehot_features)
# Handle the case the incoming x_values_lookup having more features than the model
if hasattr(self, 'cat_x_values_lookup'):
for idx, c in enumerate(self.cat_columns):
row = onehot_df.iloc[idx]
orig_x = self.cat_x_values_lookup[c]
if len(row.x) == len(orig_x) and np.all(np.array(row.x) == np.array(orig_x)):
continue
cat_x = list(row.x) + list(orig_x)
cat_y = list(row.y) + [0.] * len(orig_x)
final_x, index = np.unique(cat_x, return_index=True)
final_y = np.array(cat_y)[index]
onehot_df.at[idx, 'x'] = final_x
onehot_df.at[idx, 'y'] = final_y
if 'y_std' in onehot_df:
cat_y_std = list(row.y_std) + [0.] * len(orig_x)
onehot_df.at[idx, 'y_std'] = np.array(cat_y_std)[index]
newdf = pd.concat([df[~overall_logic_kept], onehot_df], axis=0)
newdf.feat_idx = [-1] + list(range(newdf.shape[0] - 1))
newdf = newdf.reset_index(drop=True)
return newdf
def fit(self, X, y, **kwargs):
if isinstance(X, pd.DataFrame): # in bagging, the coming X is from numpy. Don't transform
self.cat_columns = X.columns[X.dtypes == object].values.tolist()
X = pd.get_dummies(X)
return super().fit(X, y, **kwargs)
def transform_X_to_fit_model_feats(self, X):
X = | pd.get_dummies(X) | pandas.get_dummies |
#!/bin/python
import pandas as pd
import glob
import argparse
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
# Synopsis:
# Generates summary statistics on Cromwell monitoring logs collected using download_monitoring_logs.py.
# Cost estimates assume all machines are preemptible and have a fixed bootup time. Resource
# usage and costs are for requesting optimal resource (equal to the max observed) uniformly across all shards ("static")
# and individually for each shard ("dynamic").
#
# Usage:
# python analyze_monitoring_logs.py /path/to/logs /path/to/output_base
#
# Parameters:
# /path/to/logs : Path containing monitoring script logs ending in ".monitoring.log"
# /path/to/output_base : Base output path, to which extensions will be appended for each output file
#
# Author: <NAME> (<EMAIL>)
TIME_FORMAT = "%a %b %d %H:%M:%S %Z %Y"
ALL_HEADER = '#job\ttask\thr\tmem_total\tmem_gb_max\tmem_pct_max\tdisk_total\tdisk_gb_max\tdisk_pct_max\tmem_gb_hr\tdisk_gb_hr\tmax_mem_gb_hr\tmax_disk_gb_hr\tcost_mem\tcost_mem_dyn\tcost_disk\tcost_disk_dyn\n'
GROUP_HEADER = '#task\thr\tmem_avg\tmem_gb_max\tmem_pct_max\tdisk_avg\tdisk_gb_max\tdisk_pct_max\tmem_gb_hr\tdisk_gb_hr\tmax_mem_gb_hr\tmax_disk_gb_hr\tcost_mem\tcost_mem_static\tcost_mem_dyn\tcost_disk\tcost_disk_static\tcost_disk_dyn\n'
COST_PER_GB_MEM_HR = 0.000892
COST_CPU_HR = 0.006655
COST_PER_GB_DISK_HR = 0.00005555555
MIN_CPU = 1
MIN_MEM_GB = 0.9
MIN_DISK_GB = 1
BOOT_DISK_GB = 10
DEFAULT_OVERHEAD_MIN = 5.
def write_data(data, file_path, header):
with open(file_path, 'w') as f:
f.write(header)
for key in data.index:
f.write(key + '\t' + '\t'.join([str(x)
for x in data.loc(key)]) + '\n')
def read_data(dir, overhead_min=0):
data = {}
for filepath in glob.glob(dir + '/*.monitoring.log'):
with open(filepath, 'r') as f:
mem_gb_data_f = []
disk_gb_data_f = []
mem_pct_data_f = []
disk_pct_data_f = []
cpu_pct_data_f = []
total_mem = 0
total_disk = 0
total_cpu = 0
start_time = None
end_time = None
for line in f:
tokens = line.strip().split(' ')
if start_time is None and line.startswith('['):
start_time = datetime.strptime(
line.strip()[1:-1], TIME_FORMAT)
if line.startswith('['):
end_time = datetime.strptime(
line.strip()[1:-1], TIME_FORMAT)
if line.startswith('Total Memory:'):
total_mem = float(tokens[2])
elif line.startswith('#CPU:'):
total_cpu = float(tokens[1])
elif line.startswith('Total Disk space:'):
total_disk = float(tokens[3])
elif line.startswith('* Memory usage:'):
mem_gb = float(tokens[3])
mem_pct = float(tokens[5][:-1]) / 100.0
mem_gb_data_f.append(mem_gb)
mem_pct_data_f.append(mem_pct)
elif line.startswith('* Disk usage:'):
disk_gb = float(tokens[3])
disk_pct = float(tokens[5][:-1]) / 100.0
disk_gb_data_f.append(disk_gb)
disk_pct_data_f.append(disk_pct)
elif line.startswith('* CPU usage:'):
if len(tokens) == 4:
cpu_pct = float(tokens[3].replace("%", "")) / 100.0
else:
cpu_pct = 1
cpu_pct_data_f.append(cpu_pct)
if len(mem_gb_data_f) > 0 and len(disk_gb_data_f) > 0:
filename = filepath.split('/')[-1]
entry = filename.replace(".monitoring.log", "")
task = entry.split('.')[0]
max_mem_gb = max(mem_gb_data_f)
max_mem_pct = max(mem_pct_data_f)
max_disk_gb = max(disk_gb_data_f)
max_disk_pct = max(disk_pct_data_f)
max_cpu_pct = max(cpu_pct_data_f)
max_cpu = max_cpu_pct * total_cpu
delta_time = end_time - start_time
delta_hour = (delta_time.total_seconds() /
3600.) + (overhead_min / 60.0)
cpu_hour = total_cpu * delta_hour
mem_hour = total_mem * delta_hour
disk_hour = total_disk * delta_hour
max_cpu_hour = max_cpu_pct * total_cpu * delta_hour
max_mem_hour = max_mem_gb * delta_hour
max_disk_hour = max_disk_gb * delta_hour
cost_mem = COST_PER_GB_MEM_HR * mem_hour
cost_mem_opt = COST_PER_GB_MEM_HR * \
max(max_mem_gb, MIN_MEM_GB) * delta_hour
cost_disk = COST_PER_GB_DISK_HR * \
(total_disk + BOOT_DISK_GB) * delta_hour
cost_disk_opt = COST_PER_GB_DISK_HR * \
(max(max_disk_gb, MIN_DISK_GB) + BOOT_DISK_GB) * delta_hour
cost_cpu = COST_CPU_HR * total_cpu * delta_hour
cost_cpu_opt = COST_CPU_HR * \
max(max_cpu, MIN_MEM_GB) * delta_hour
data[entry] = {
"task": task,
"delta_hour": delta_hour,
"total_cpu": total_cpu,
"total_mem": total_mem,
"total_disk": total_disk,
"max_cpu": max_cpu,
"max_cpu_pct": max_cpu_pct,
"max_mem_gb": max_mem_gb,
"max_mem_pct": max_mem_pct,
"max_disk_gb": max_disk_gb,
"max_disk_pct": max_disk_pct,
"cpu_hour": cpu_hour,
"mem_hour": mem_hour,
"disk_hour": disk_hour,
"max_cpu_hour": max_cpu_hour,
"max_mem_hour": max_mem_hour,
"max_disk_hour": max_disk_hour,
"cost_cpu": cost_cpu,
"cost_cpu_opt": cost_cpu_opt,
"cost_mem": cost_mem,
"cost_mem_opt": cost_mem_opt,
"cost_disk": cost_disk,
"cost_disk_opt": cost_disk_opt
}
return data
def get_data_field(name, data):
return [x[name] for x in data]
def calc_group(data):
task_names = data.task.unique()
group_data = {}
for task in task_names:
d = data.loc[data['task'] == task]
hours = np.sum(d["delta_hour"])
avg_cpu = np.mean(d["total_cpu"])
avg_mem = np.mean(d["total_mem"])
max_mem = np.max(d["max_mem_gb"])
max_cpu = np.max(d["max_cpu"])
max_cpu_pct = np.max(d["max_cpu_pct"])
max_mem_pct = np.max(d["max_mem_pct"])
avg_disk = np.mean(d["total_disk"])
max_disk = np.max(d["max_disk_gb"])
max_disk_pct = np.max(d["max_disk_pct"])
cpu_hour = np.sum(d["cpu_hour"])
mem_hour = np.sum(d["mem_hour"])
disk_hour = np.sum(d["disk_hour"])
max_cpu_hour = np.max(d["max_cpu_hour"])
max_mem_hour = np.max(d["max_mem_hour"])
max_disk_hour = np.max(d["max_disk_hour"])
cost_cpu = np.sum(d["cost_cpu"])
cost_cpu_dyn = np.sum(d["cost_cpu_opt"])
cost_mem = np.sum(d["cost_mem"])
cost_mem_dyn = np.sum(d["cost_mem_opt"])
cost_disk = np.sum(d["cost_disk"])
cost_disk_dyn = np.sum(d["cost_disk_opt"])
cost_cpu_static = COST_CPU_HR * max(max_cpu, MIN_CPU) * hours
cost_mem_static = COST_PER_GB_MEM_HR * max(max_mem, MIN_MEM_GB) * hours
cost_disk_static = COST_PER_GB_DISK_HR * \
(max(max_disk, MIN_DISK_GB) + BOOT_DISK_GB) * hours
group_data[task] = {
"hours": hours,
"avg_cpu": avg_cpu,
"avg_mem": avg_mem,
"avg_disk": avg_disk,
"max_cpu": max_cpu,
"max_cpu_pct": max_cpu_pct,
"max_mem": max_mem,
"max_mem_pct": max_mem_pct,
"max_disk": max_disk,
"max_disk_pct": max_disk_pct,
"cpu_hour": cpu_hour,
"mem_hour": mem_hour,
"disk_hour": disk_hour,
"max_cpu_hour": max_cpu_hour,
"max_mem_hour": max_mem_hour,
"max_disk_hour": max_disk_hour,
"cost_cpu": cost_cpu,
"cost_cpu_static": cost_cpu_static,
"cost_cpu_dyn": cost_cpu_dyn,
"cost_mem": cost_mem,
"cost_mem_static": cost_mem_static,
"cost_mem_dyn": cost_mem_dyn,
"cost_disk": cost_disk,
"cost_disk_static": cost_disk_static,
"cost_disk_dyn": cost_disk_dyn,
"total_cost": cost_cpu + cost_mem + cost_disk,
"total_cost_static": cost_cpu_static + cost_mem_static + cost_disk_static,
"total_cost_dyn": cost_cpu_dyn + cost_mem_dyn + cost_disk_dyn
}
return group_data
def do_simple_bar(data, xticks, path, bar_width=0.35, height=12, width=12,
xtitle='', ytitle='', title='', bottom_adjust=0, legend=[],
yscale='linear', sort_values=None):
num_groups = max([d.shape[0] for d in data])
if sort_values is not None:
sort_indexes = np.flip(np.argsort(sort_values))
else:
sort_indexes = np.arange(num_groups)
plt.figure(num=None, figsize=(width, height),
dpi=100, facecolor='w', edgecolor='k')
for i in range(len(data)):
if i < len(legend):
label = legend[i]
else:
label = "data" + str(i)
x = (np.arange(num_groups) * len(data) + i) * bar_width
plt.bar(x, data[i][sort_indexes], label=label)
x = (np.arange(num_groups) * len(data)) * bar_width
plt.xticks(x, [xticks[i] for i in sort_indexes], rotation='vertical')
plt.xlabel(xtitle)
plt.ylabel(ytitle)
plt.title(title)
plt.subplots_adjust(bottom=bottom_adjust)
plt.yscale(yscale)
plt.legend()
plt.savefig(path)
def create_graphs(data, out_files_base, semilog=False, num_samples=None):
tasks = data.index
if num_samples is not None:
data = data / num_samples
ytitle = "Cost, $/sample"
title = "Estimated Cost Per Sample"
else:
ytitle = "Cost, $"
title = "Estimated Total Cost"
if semilog:
yscale = "log"
else:
yscale = "linear"
do_simple_bar(data=[data["total_cost"], data["total_cost_static"], data["total_cost_dyn"]],
xticks=tasks,
path=out_files_base + ".cost.png",
bar_width=1,
height=8,
width=12,
xtitle="Task",
ytitle=ytitle,
title=title,
bottom_adjust=0.35,
legend=["Current", "Unif", "Pred"],
yscale=yscale,
sort_values=data["total_cost"])
# Main function
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"log_dir", help="Path containing monitoring script logs ending in \".monitoring.log\"")
parser.add_argument("output_file", help="Output tsv file base path")
parser.add_argument("--overhead", help="Localization overhead in minutes")
parser.add_argument("--semilog", help="Plot semilog y",
action="store_true")
parser.add_argument(
"--plot-norm", help="Specify number of samples to normalize plots to per sample")
args = parser.parse_args()
if not args.overhead:
overhead = DEFAULT_OVERHEAD_MIN
else:
overhead = float(args.overhead)
if args.plot_norm:
plot_norm = int(args.plot_norm)
else:
plot_norm = None
log_dir = args.log_dir
out_file = args.output_file
data = read_data(log_dir, overhead_min=overhead)
df = pd.DataFrame(data).T
group_data = calc_group(df)
group_df = | pd.DataFrame(group_data) | pandas.DataFrame |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=FEsingle.InputChgSum(df_all,5,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,5,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,5,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,12,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,12,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,12,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,25,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,25,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,25,'net_mf_amount')
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<6]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23_pos(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['tomorrow_chg_rank']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPy(x)).reset_index(0,drop=True)
df_all['tomorrow_chg_rank']=df_all.groupby('ts_code')['tomorrow_chg_rank'].shift(-20)
df_all['tomorrow_chg']=df_all['tomorrow_chg_rank']
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
#df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a41(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all= | pd.read_csv(DataSetName[4],index_col=0,header=0) | pandas.read_csv |
import trainer
import tensorflow as tf
import pandas as pd
import unittest
class TestTrainerCase(unittest.TestCase):
def test_construct_feature_columns(self):
feature_name = ['test']
feature_columns = trainer.construct_feature_columns(feature_name)
expected = [tf.feature_column.numeric_column(key='test')]
self.assertEqual(expected, feature_columns)
def test_get_input_fn(self):
features = {'col_1': [1, 2, 3, 4]}
label = {'col_2': [0] * 4}
features_df = | pd.DataFrame.from_dict(features) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from fltk.util.arguments import Arguments
import logging
from fltk.datasets.cifar100 import CIFAR100Dataset
from .base import BaseDistDataset, DataframeDataset
# def get_tuple_from_data_loader(data_loader):
# """
# Get a tuple representation of the data stored in a data loader.
#
# :param data_loader: data loader to get data from
# :type data_loader: torch.utils.data.DataLoader
# :return: tuple
# """
# return (next(iter(data_loader))[0].numpy(), next(iter(data_loader))[1].numpy())
def load_cifar_train():
# TODO:: Generate cifar100.txt
args = Arguments(logging)
cifar = CIFAR100Dataset(args)
train_data = cifar.load_train_dataset()
test_data = cifar.load_test_dataset()
# train_df, test_df, train_labels, test_labels = train_test_split(feats, labels, test_size=1 / 3, random_state=42)
print(type(train_data))
print(train_data)
print("Tuple size")
print(len(train_data))
print(train_data[0].shape)
train_df = | pd.DataFrame(train_data[0]) | pandas.DataFrame |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylife.mesh.gradient
import pandas as pd
import numpy as np
def test_grad_constant():
fkt = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
df = pd.DataFrame({'node_id': [1, 2, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8, 9],
'element_id': [1, 1, 2, 2, 1, 3, 1, 2, 3, 4, 2, 4, 3, 3, 4, 4],
'x': [0, 1, 1, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 1, 2],
'y': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'z': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'fct': fkt}).set_index(['node_id', 'element_id'])
expected = pd.DataFrame({
'dfct_dx': np.zeros(9),
'dfct_dy': np.zeros(9),
'dfct_dz': np.zeros(9)
}, index=pd.RangeIndex(1, 10, name='node_id'))
grad = df.gradient.gradient_of('fct')
pd.testing.assert_frame_equal(grad, expected)
def test_grad_dx():
fkt = [1, 4, 4, 7, 1, 1, 4, 4, 4, 4, 7, 7, 1, 4, 4, 7]
df = pd.DataFrame({'node_id': [1, 2, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8, 9],
'element_id': [1, 1, 2, 2, 1, 3, 1, 2, 3, 4, 2, 4, 3, 3, 4, 4],
'x': [0, 1, 1, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 1, 2],
'y': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'z': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'fct': fkt})
df = df.set_index(['node_id', 'element_id'])
expected = pd.DataFrame({
'dfct_dx': np.full(9, 3.0),
'dfct_dy': np.zeros(9),
'dfct_dz': np.zeros(9)
}, index=pd.RangeIndex(1, 10, name='node_id'))
grad = df.gradient.gradient_of('fct')
pd.testing.assert_frame_equal(grad, expected)
def test_grad_dx_shuffle():
fkt = [1, 4, 4, 7, 1, 1, 4, 4, 4, 4, 7, 7, 1, 4, 4, 7]
df = pd.DataFrame({'node_id': [1, 2, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8, 9],
'element_id': [1, 1, 2, 2, 1, 3, 1, 2, 3, 4, 2, 4, 3, 3, 4, 4],
'x': [0, 1, 1, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 1, 2],
'y': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'z': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'fct': fkt})
df = df.set_index(['node_id', 'element_id'])
df = df.sample(frac=1)
expected = pd.DataFrame({
'dfct_dx': np.full(9, 3.0),
'dfct_dy': np.zeros(9),
'dfct_dz': np.zeros(9)
}, index=pd.RangeIndex(1, 10, name='node_id'))
grad = df.gradient.gradient_of('fct')
pd.testing.assert_frame_equal(grad, expected)
def test_grad_dy():
fkt = [1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 7, 7, 7, 7]
df = pd.DataFrame({'node_id': [1, 2, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8, 9],
'element_id': [1, 1, 2, 2, 1, 3, 1, 2, 3, 4, 2, 4, 3, 3, 4, 4],
'x': [0, 1, 1, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 1, 2],
'y': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'z': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'fct': fkt})
df = df.set_index(['node_id', 'element_id'])
df = df.sample(frac=1)
expected = pd.DataFrame({
'dfct_dx': np.zeros(9),
'dfct_dy': np.full(9, 3.0),
'dfct_dz': np.zeros(9)
}, index=pd.RangeIndex(1, 10, name='node_id'))
grad = df.gradient.gradient_of('fct')
pd.testing.assert_frame_equal(grad, expected)
def test_grad_dy_shuffle():
fkt = [1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 7, 7, 7, 7]
df = pd.DataFrame({'node_id': [1, 2, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8, 9],
'element_id': [1, 1, 2, 2, 1, 3, 1, 2, 3, 4, 2, 4, 3, 3, 4, 4],
'x': [0, 1, 1, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 1, 2],
'y': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'z': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'fct': fkt})
df = df.set_index(['node_id', 'element_id'])
expected = pd.DataFrame({
'dfct_dx': np.zeros(9),
'dfct_dy': np.full(9, 3.0),
'dfct_dz': np.zeros(9)
}, index=pd.RangeIndex(1, 10, name='node_id'))
grad = df.gradient.gradient_of('fct')
| pd.testing.assert_frame_equal(grad, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from unittest import mock
import pandas
import pandas.testing
import pytest
import google.api_core.exceptions
from google.cloud.bigquery_storage import types
from .helpers import SCALAR_COLUMNS, SCALAR_COLUMN_NAMES, SCALAR_BLOCKS
pyarrow = pytest.importorskip("pyarrow")
# This dictionary is duplicated in bigquery/google/cloud/bigquery/_pandas_helpers.py
# When modifying it be sure to update it there as well.
BQ_TO_ARROW_TYPES = {
"int64": pyarrow.int64(),
"float64": pyarrow.float64(),
"bool": pyarrow.bool_(),
"numeric": pyarrow.decimal128(38, 9),
"string": pyarrow.utf8(),
"bytes": pyarrow.binary(),
"date": pyarrow.date32(), # int32 days since epoch
"datetime": pyarrow.timestamp("us"),
"time": pyarrow.time64("us"),
"timestamp": pyarrow.timestamp("us", tz="UTC"),
}
@pytest.fixture()
def mut():
from google.cloud.bigquery_storage_v1 import reader
return reader
@pytest.fixture()
def class_under_test(mut):
return mut.ReadRowsStream
@pytest.fixture()
def mock_gapic_client():
from google.cloud.bigquery_storage_v1.services import big_query_read
return mock.create_autospec(big_query_read.BigQueryReadClient)
def _bq_to_arrow_batch_objects(bq_blocks, arrow_schema):
arrow_batches = []
for block in bq_blocks:
arrays = []
for name in arrow_schema.names:
arrays.append(
pyarrow.array(
(row[name] for row in block),
type=arrow_schema.field(name).type,
size=len(block),
)
)
arrow_batches.append(
pyarrow.RecordBatch.from_arrays(arrays, schema=arrow_schema)
)
return arrow_batches
def _bq_to_arrow_batches(bq_blocks, arrow_schema):
arrow_batches = []
first_message = True
for record_batch in _bq_to_arrow_batch_objects(bq_blocks, arrow_schema):
response = types.ReadRowsResponse()
response.arrow_record_batch.serialized_record_batch = (
record_batch.serialize().to_pybytes()
)
if first_message:
response.arrow_schema = {
"serialized_schema": arrow_schema.serialize().to_pybytes(),
}
first_message = False
arrow_batches.append(response)
return arrow_batches
def _bq_to_arrow_schema(bq_columns):
def bq_col_as_field(column):
metadata = None
if column.get("description") is not None:
metadata = {"description": column.get("description")}
name = column["name"]
type_ = BQ_TO_ARROW_TYPES[column["type"]]
mode = column.get("mode", "nullable").lower()
return pyarrow.field(name, type_, mode == "nullable", metadata)
return pyarrow.schema(bq_col_as_field(c) for c in bq_columns)
def _generate_arrow_read_session(arrow_schema):
return types.ReadSession(
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()}
)
def _pages_w_unavailable(pages):
for page in pages:
yield page
raise google.api_core.exceptions.ServiceUnavailable("test: please reconnect")
def test_pyarrow_rows_raises_import_error(
mut, class_under_test, mock_gapic_client, monkeypatch
):
monkeypatch.setattr(mut, "pyarrow", None)
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
rows = iter(reader.rows())
# Since session isn't passed in, reader doesn't know serialization type
# until you start iterating.
with pytest.raises(ImportError):
next(rows)
def test_to_arrow_no_pyarrow_raises_import_error(
mut, class_under_test, mock_gapic_client, monkeypatch
):
monkeypatch.setattr(mut, "pyarrow", None)
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
with pytest.raises(ImportError):
reader.to_arrow()
with pytest.raises(ImportError):
reader.rows().to_arrow()
with pytest.raises(ImportError):
next(reader.rows().pages).to_arrow()
def test_to_arrow_w_scalars_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
actual_table = reader.to_arrow()
expected_table = pyarrow.Table.from_batches(
_bq_to_arrow_batch_objects(SCALAR_BLOCKS, arrow_schema)
)
assert actual_table == expected_table
def test_to_dataframe_w_scalars_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
got = reader.to_dataframe()
expected = pandas.DataFrame(
list(itertools.chain.from_iterable(SCALAR_BLOCKS)), columns=SCALAR_COLUMN_NAMES
)
pandas.testing.assert_frame_equal(
got.reset_index(drop=True), # reset_index to ignore row labels
expected.reset_index(drop=True),
)
def test_rows_w_empty_stream_arrow(class_under_test, mock_gapic_client):
reader = class_under_test([], mock_gapic_client, "", 0, {})
got = reader.rows()
assert tuple(got) == ()
def test_rows_w_scalars_arrow(class_under_test, mock_gapic_client):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
got = tuple(
dict((key, value.as_py()) for key, value in row_dict.items())
for row_dict in reader.rows()
)
expected = tuple(itertools.chain.from_iterable(SCALAR_BLOCKS))
assert got == expected
def test_to_dataframe_w_dtypes_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(
[
{"name": "bigfloat", "type": "float64"},
{"name": "lilfloat", "type": "float64"},
]
)
blocks = [
[{"bigfloat": 1.25, "lilfloat": 30.5}, {"bigfloat": 2.5, "lilfloat": 21.125}],
[{"bigfloat": 3.75, "lilfloat": 11.0}],
]
arrow_batches = _bq_to_arrow_batches(blocks, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
got = reader.to_dataframe(dtypes={"lilfloat": "float16"})
expected = pandas.DataFrame(
{
"bigfloat": [1.25, 2.5, 3.75],
"lilfloat": pandas.Series([30.5, 21.125, 11.0], dtype="float16"),
},
columns=["bigfloat", "lilfloat"],
)
pandas.testing.assert_frame_equal(
got.reset_index(drop=True), # reset_index to ignore row labels
expected.reset_index(drop=True),
)
def test_to_dataframe_empty_w_scalars_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
read_session = _generate_arrow_read_session(arrow_schema)
arrow_batches = _bq_to_arrow_batches([], arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
# Read session is needed to get a schema for empty streams.
got = reader.to_dataframe(read_session)
expected = pandas.DataFrame([], columns=SCALAR_COLUMN_NAMES)
expected["int_col"] = expected["int_col"].astype("int64")
expected["float_col"] = expected["float_col"].astype("float64")
expected["bool_col"] = expected["bool_col"].astype("bool")
expected["ts_col"] = (
expected["ts_col"].astype("datetime64[ns]").dt.tz_localize("UTC")
)
pandas.testing.assert_frame_equal(
got.reset_index(drop=True), # reset_index to ignore row labels
expected.reset_index(drop=True),
)
def test_to_dataframe_empty_w_dtypes_arrow(class_under_test, mock_gapic_client):
arrow_schema = _bq_to_arrow_schema(
[
{"name": "bigfloat", "type": "float64"},
{"name": "lilfloat", "type": "float64"},
]
)
read_session = _generate_arrow_read_session(arrow_schema)
arrow_batches = _bq_to_arrow_batches([], arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
# Read session is needed to get a schema for empty streams.
got = reader.to_dataframe(read_session, dtypes={"lilfloat": "float16"})
expected = pandas.DataFrame([], columns=["bigfloat", "lilfloat"])
expected["bigfloat"] = expected["bigfloat"].astype("float64")
expected["lilfloat"] = expected["lilfloat"].astype("float16")
pandas.testing.assert_frame_equal(
got.reset_index(drop=True), # reset_index to ignore row labels
expected.reset_index(drop=True),
)
def test_to_dataframe_by_page_arrow(class_under_test, mock_gapic_client):
bq_columns = [
{"name": "int_col", "type": "int64"},
{"name": "bool_col", "type": "bool"},
]
arrow_schema = _bq_to_arrow_schema(bq_columns)
bq_block_1 = [
{"int_col": 123, "bool_col": True},
{"int_col": 234, "bool_col": False},
]
bq_block_2 = [
{"int_col": 345, "bool_col": True},
{"int_col": 456, "bool_col": False},
]
bq_block_3 = [
{"int_col": 567, "bool_col": True},
{"int_col": 789, "bool_col": False},
]
bq_block_4 = [{"int_col": 890, "bool_col": True}]
# Break blocks into two groups to test that iteration continues across
# reconnection.
bq_blocks_1 = [bq_block_1, bq_block_2]
bq_blocks_2 = [bq_block_3, bq_block_4]
batch_1 = _bq_to_arrow_batches(bq_blocks_1, arrow_schema)
batch_2 = _bq_to_arrow_batches(bq_blocks_2, arrow_schema)
mock_gapic_client.read_rows.return_value = batch_2
reader = class_under_test(
_pages_w_unavailable(batch_1), mock_gapic_client, "", 0, {}
)
got = reader.rows()
pages = iter(got.pages)
page_1 = next(pages)
pandas.testing.assert_frame_equal(
page_1.to_dataframe(
dtypes={"int_col": "int64", "bool_col": "bool"}
).reset_index(drop=True),
| pandas.DataFrame(bq_block_1, columns=["int_col", "bool_col"]) | pandas.DataFrame |
# =========================================================================== #
# INDEPENDENCE MODULE #
# =========================================================================== #
'''Modules for analyzing indendence between variables.'''
# %%
# --------------------------------------------------------------------------- #
# LIBRARIES #
# --------------------------------------------------------------------------- #
import collections
from collections import OrderedDict
import itertools
from itertools import combinations
from itertools import product
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
from scipy import stats
import scikit_posthocs as sp
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
# %%
# ---------------------------------------------------------------------------- #
# CORRELATION #
# ---------------------------------------------------------------------------- #
class Correlation:
'''Class that computes the pairwise correlation between numeric variables
and renders a heatmap
'''
def __init__(self):
self._corr = None
pass
def test(self, df, method='pearson'):
self._corr = df.corr(method)
return(self._corr)
def pairwise(self, df, x, y, method='pearson', threshold=None):
r_tests = pd.DataFrame()
for xs, ys in zip(x,y):
r = df[xs].corr(df[ys])
df_r = pd.DataFrame({'x':xs, 'y':ys}, index=[0])
df_r['r'] = r
df_r['r_abs'] = np.absolute(r)
df_r['strength'] = np.where(df_r.r_abs<0.2, 'Very Weak',
np.where(df_r.r_abs<0.4, 'Weak',
np.where(df_r.r_abs<0.6, "Moderate",
np.where(df_r.r_abs<0.8, "Strong", "Very Strong"))))
df_r['direction'] = np.where(df_r.r <0, "Negative", "Positive")
r_tests = pd.concat([r_tests, df_r], axis=0)
r_tests = r_tests.sort_values(by='r_abs', ascending=False)
if threshold:
r_tests = r_tests[r_tests.r_abs > threshold]
return(r_tests)
def corrtable(self, threshold=None):
r_tests = pd.DataFrame()
cols = self._corr.columns.tolist()
for i in range(len(cols)):
for j in range(len(cols)):
if i != j:
df_r = pd.DataFrame({'x': cols[i], 'y':cols[j], 'r': self._corr.iloc[i][j],
'r_abs': np.absolute(self._corr.iloc[i][j])}, index=[0])
df_r['strength'] = np.where(df_r.r_abs<0.2, 'Very Weak',
np.where(df_r.r_abs<0.4, 'Weak',
np.where(df_r.r_abs<0.6, "Moderate",
np.where(df_r.r_abs<0.8, "Strong", "Very Strong"))))
df_r['direction'] = np.where(df_r.r <0, "Negative", "Positive")
r_tests = pd.concat([r_tests, df_r], axis=0)
r_tests = r_tests.sort_values(by='r_abs', ascending=False)
if threshold:
r_tests = r_tests[r_tests.r_abs > threshold]
return(r_tests)
def corrplot(self):
sns.heatmap(self._corr, xticklabels=self._corr.columns,
yticklabels=self._corr.columns)
# ---------------------------------------------------------------------------- #
# INDEPENDENCE #
# ---------------------------------------------------------------------------- #
class Independence:
"Class that performs a test of independence"
def __init__(self):
self._sig = 0.05
self._x2 = 0
self._p = 0
self._df = 0
self._obs = []
self._exp = []
def summary(self):
print("\n*", "=" * 78, "*")
print('{:^80}'.format("Pearson's Chi-squared Test of Independence"))
print('{:^80}'.format('Data'))
print('{:^80}'.format("x = " + self._xvar + " y = " + self._yvar + "\n"))
print('{:^80}'.format('Observed Frequencies'))
visual.print_df(self._obs)
print("\n", '{:^80}'.format('Expected Frequencies'))
visual.print_df(self._exp)
results = ("Pearson's chi-squared statistic = " + str(round(self._x2, 3)) + ", Df = " +
str(self._df) + ", p-value = " + '{0:1.2e}'.format(round(self._p, 3)))
print("\n", '{:^80}'.format(results))
print("\n*", "=" * 78, "*")
def post_hoc(self, rowwise=True, verbose=False):
dfs = []
if rowwise:
rows = range(0, len(self._obs))
for pair in list(combinations(rows, 2)):
ct = self._obs.iloc[[pair[0], pair[1]], ]
levels = ct.index.values
x2, p, dof, exp = stats.chi2_contingency(ct)
df = pd.DataFrame({'level_1': levels[0],
'level_2': levels[1],
'x2': x2,
'N': ct.values.sum(),
'p_value': p}, index=[0])
dfs.append(df)
self._post_hoc_tests = pd.concat(dfs)
else:
cols = range(0, len(self._obs.columns.values))
for pair in list(combinations(cols, 2)):
ct = self._obs.iloc[:, [pair[0], pair[1]]]
levels = ct.columns.values
x2, p, dof, exp = stats.chi2_contingency(ct)
df = pd.DataFrame({'level_1': levels[0],
'level_2': levels[1],
'x2': x2,
'N': ct.values.sum(),
'p_value': p}, index=[0])
dfs.append(df)
self._post_hoc_tests = pd.concat(dfs)
if (verbose):
visual.print_df(self._post_hoc_tests)
return(self._post_hoc_tests)
def test(self, x, y, sig=0.05):
self._x = x
self._y = y
self._xvar = x.name
self._yvar = y.name
self._n = x.shape[0]
self._sig = sig
ct = pd.crosstab(x, y)
x2, p, dof, exp = stats.chi2_contingency(ct)
self._x2 = x2
self._p = p
self._df = dof
self._obs = ct
self._exp = pd.DataFrame(exp).set_index(ct.index)
self._exp.columns = ct.columns
if p < sig:
self._result = 'significant'
self._hypothesis = 'reject'
else:
self._result = 'not significant'
self._hypothesis = 'fail to reject'
return x2, p, dof, exp
def report(self, verbose=False):
"Returns or prints results in APA format"
tup = ("A Chi-square test of independence was conducted to "
"examine the relation between " + self._xvar + " and " + self._yvar + ". "
"The relation between the variables was " + self._result + ", "
"X2(" + str(self._df) + ", N = ", str(self._n) + ") = " +
str(round(self._x2, 2)) + ", p = " + '{0:1.2e}'.format(round(self._p, 3)))
self._report = ''.join(tup)
wrapper = textwrap.TextWrapper(width=80)
lines = wrapper.wrap(text=self._report)
if verbose:
for line in lines:
print(line)
return(self._report)
# ---------------------------------------------------------------------------- #
# ANOVA #
# ---------------------------------------------------------------------------- #
#%%
class Anova:
'''
Computes Anova tests
'''
def __init__(self):
pass
def aov_test(self, df, x, y, type=2, test='F', sig=0.05):
df2 = | pd.DataFrame({'x': df[x], 'y': df[y]}) | pandas.DataFrame |
import funcy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
from dateutil import parser
from tqdm import tqdm
from utils.helpers import *
from utils.plot import plot_joint_distribution
font = {
"size": 30
}
matplotlib.rc("font", **font)
pd.options.mode.chained_assignment = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MOST_RECENT_FILE = sorted(os.listdir(os.path.join(BASE_DIR, "data", "REDCap")))[-1]
REDCAP_FPATH = os.path.join(BASE_DIR, "data", "REDCap", MOST_RECENT_FILE)
SERIES_ID_FPATH = os.path.join(BASE_DIR, "data", "match_redcap_plataforma.csv")
SEGMENTATION_FPATH = os.path.join(BASE_DIR, "data", "inference_df.csv")
get_date_regex = r"ProjetoCOVIDAI_DATA_(?P<data>.*)_\d+.csv"
date_str = re.match(get_date_regex, MOST_RECENT_FILE).group("data")
dataset_date = parser.parse(date_str)
# Normalize name and CPF
df = pd.read_csv(REDCAP_FPATH)
df.nome = df.nome.apply(lambda s: to_normalized_string(s) if pd.notna(s) else s)
df.cpf = df.cpf.apply(lambda v: str(int(v)) if pd.notna(v) else v)
# Fill redcap_repeat_instrument missing data with "dados_pessoais_unico" since these
# rows are not filled automatically by the database
df.redcap_repeat_instrument = df.redcap_repeat_instrument.fillna("dados_pessoais_unico")
# Fill the missing hospitalization date with date of admission to ICU if existent
df.data_admissao_hospitalar = df.data_admissao_hospitalar.fillna(df.data_admissao_uti)
# Calculate length of stay based on hospitalization date and date of discharge or
# date of death
fill_length_of_stay = df.apply(
lambda row: calculate_length_of_stay(
row["data_admissao_hospitalar"],
row["data_alta_hospitalar"],
row["data_obito"]
),
axis=1
)
df.tempo_estadia_hospitalar = df.tempo_estadia_hospitalar.fillna(fill_length_of_stay)
# Calculate the date of discharge from ICU based on the date of admission
# in the ICU and length of stay in the ICU.
df["data_alta_uti"] = df.apply(
lambda row: sum_date_with_interval(
row["data_admissao_uti"],
row["tempo_estadia_uti"]
),
axis=1
)
# Calculate the date of removal of the ventilation based on the date of ventilation
# and the length of ventilation
df["data_remocao_ventilacao"] = df.apply(
lambda row: sum_date_with_interval(
row["data_ventilacao"],
row["tempo_ventilacao_mecanica"]
),
axis=1
)
# Calculate age and body mass index
df["idade"] = df.apply(
lambda row: calculate_age(
row["data_nasc"],
row["data_admissao_hospitalar"],
dataset_date
),
axis=1
)
df["imc"] = df.peso / (df.altura ** 2)
# Some of the rows have the plaquets number in a different unity and need to be
# multiplied by 1000
df.plaquetas = df.plaquetas.apply(lambda v: v * 1000 if v < 1000 else v)
############################## Finished processing the ordinary data ##############################
# Here we define variables useful for processing the rest of the data
cols_intermediate_outcomes = [
"data_sepse",
"sepse",
"data_sdra",
"sdra",
"data_falencia_cardiaca",
"falencia_cardiaca",
"data_choque_septico",
"choque_septico",
"data_coagulopatia",
"coagulopatia",
"data_iam",
"iam",
"data_ira",
"ira"
]
cols_personal_data = [
"nome",
"cpf",
"instituicao",
"data_nasc",
"idade",
"sexo",
"altura",
"peso",
"imc",
"alta",
"obito",
"data_admissao_hospitalar",
"data_admissao_uti",
"data_obito",
"data_alta_hospitalar",
"data_alta_uti",
"data_ventilacao",
"data_remocao_ventilacao",
"tempo_estadia_hospitalar",
"tempo_estadia_uti",
"tempo_ventilacao_mecanica"
] + cols_intermediate_outcomes
cols_comorbidities = [
"has",
"ieca_bra",
"dm",
"asma",
"tabagista",
"dpoc",
"cardiopatia",
"irc",
"neoplasia",
"aids",
"neutropenia"
]
cols_respiratory_comorbidities = [
"asma", "tabagista", "dpoc"
]
cols_cardiac_comorbidities = [
"has", "cardiopatia"
]
cols_dates = [
col for col in df.columns
if "data" in col and col not in
cols_personal_data + ["redcap_data_access_group"]
]
identity_map = {
0: 0,
1: 1
}
irc_map = {
1: "negativo",
2: "nao_dialitico",
3: "dialitico"
}
neoplasia_map = {
1: "negativo",
2: "primaria_ou_secundaria",
3: "outras"
}
map_comorbidities = {
"irc": irc_map,
"neoplasia": neoplasia_map
}
# Now we build a separate dataframe for saving pesonal data.
df_personal_data = df[df.redcap_repeat_instrument == "dados_pessoais_unico"]
# Discriminate patients that were admitted to the hospital and to the ICU. Also, discriminate those that
# were discharged and those who died.
df_personal_data["internacao"] = df_personal_data.data_admissao_hospitalar.notna()
df_personal_data["uti"] = df_personal_data.data_admissao_uti.notna()
df_personal_data["obito"] = df_personal_data.data_obito.notna()
df_personal_data["alta"] = df_personal_data.data_alta_hospitalar.notna()
df_personal_data = df_personal_data[
["record_id"] + cols_personal_data + cols_comorbidities
]
for col in cols_comorbidities:
df_personal_data[col] = df_personal_data[col].map(map_comorbidities.get(col, identity_map))
# Count the number of previous comorbidities each patient has.
df_personal_data["n_comorbidades"] = df_personal_data[cols_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_respiratorias"] = df_personal_data[cols_respiratory_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_cardiacas"] = df_personal_data[cols_cardiac_comorbidities].apply(count_comorbidities, axis=1)
############################## Finished processing the personal data ##############################
# Now we build separate dataframes for saving clinical, treatment, laboratorial, image and confirmatory data.
# Clinical dataframe
cols_clinical = [
"data_dispneia",
"dispneia",
"data_sofa",
"sofa_score",
"data_saturacao_o2",
"saturacao_o2",
"data_saps_3",
"saps_3"
]
df_clinical = df[df.redcap_repeat_instrument == "evolucao_clinica_multiplo"]
df_clinical = df_clinical[["record_id"] + cols_clinical]
# We need separate dataframes for each date. Note that the clinical dataframe has four date. We will separate
# the columns accordingly.
df_dispneia = df_clinical[[
"record_id",
"data_dispneia",
"dispneia"
]]
df_sofa = df_clinical[[
"record_id",
"data_sofa",
"sofa_score"
]]
df_saturacao_o2 = df_clinical[[
"record_id",
"data_saturacao_o2",
"saturacao_o2"
]]
df_saps_3 = df_clinical[[
"record_id",
"data_saps_3",
"saps_3"
]]
# Treatment dataframe
cols_treatment = [
"data_ventilacao",
"ventilacao",
"pao2_fio2",
"data_pronacao",
"pronacao",
"data_hemodialise",
"hemodialise"
]
df_treatment = df[df.redcap_repeat_instrument == "evolucao_tratamento_multiplo"]
df_treatment = df_treatment[["record_id"] + cols_treatment]
# Note that the treatment dataframe has four date. We will separate the columns accordingly
# just as we did for the clinical dataframe.
df_ventilacao = df_treatment[[
"record_id",
"data_ventilacao",
"ventilacao",
"pao2_fio2"
]]
df_pronacao = df_treatment[[
"record_id",
"data_pronacao",
"pronacao"
]]
df_hemodialise = df_treatment[[
"record_id" ,
"data_hemodialise",
"hemodialise"
]]
# Laboratory results dataframe
cols_laboratory = [
"leucocitos",
"linfocitos",
"neutrofilos",
"tgp",
"creatinina",
"pcr",
"d_dimero",
"il_6",
"plaquetas",
"rni",
"troponina",
"pro_bnp",
"bicarbonato",
"lactato"
]
df_laboratory = df[df.redcap_repeat_instrument == "evolucao_laboratorial_multiplo"]
df_laboratory = df_laboratory[["record_id", "data_resultados_lab"] + cols_laboratory]
# Image dataframe
cols_image = [
"uid_imagem",
"tipo_imagem",
"data_imagem",
"padrao_imagem_rsna",
"score_tc_dir_sup",
"score_tc_dir_med",
"score_tc_dir_inf",
"score_tc_esq_sup",
"score_tc_esq_med",
"score_tc_esq_inf"
]
df_image = df[df.redcap_repeat_instrument == "evolucao_imagem_multiplo"]
df_image.uid_imagem = df_image.uid_imagem.apply(lambda s: s.strip() if pd.notna(s) else s)
df_image = df_image[["record_id", "redcap_repeat_instance"] + cols_image]
df_image = pd.merge(
left=df_personal_data[["record_id", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"]],
right=df_image,
how="right",
on="record_id",
validate="one_to_many"
)
uids_internados = set(df_image[df_image.data_admissao_hospitalar.notna()].uid_imagem.unique())
# For images, we also have the data retrieved from the deep segmentation model. We need
# to enrich our dataframe with the percentage of healthy lungs, affected by ground-glass opacity
# and consolidation, and the amount of fat in patient's body.
cols_series_id = [
"record_id",
"redcap_repeat_instance",
"infer_series_id"
]
df_series_id = pd.read_csv(SERIES_ID_FPATH, sep=";")
df_series_id = df_series_id[cols_series_id]
df_series_id = df_series_id.drop_duplicates()
cols_segmentation = [
"UID_Plataforma",
"series_id",
"seg_consolidacao",
"seg_normal",
"seg_vf1",
"seg_vf2",
"seg_vf3",
"volume_pulmao",
"taxa_gordura",
"volume_gordura",
"mediastino"
]
tmp_data = []
df_seg_raw = pd.read_csv(SEGMENTATION_FPATH)
df_seg_raw = df_seg_raw[cols_segmentation]
df_seg_raw = df_seg_raw[df_seg_raw.volume_pulmao >= 1.]
df_seg_raw = pd.merge(left=df_series_id, right=df_seg_raw, left_on="infer_series_id", right_on="series_id", how="right")
# Each TC study might have multiple series. We need to select the one with
grouped = df_seg_raw.groupby("UID_Plataforma")
for uid_imagem, group in grouped:
if any(group.mediastino):
use_group = group[group.mediastino]
else:
use_group = group
sorted_group = use_group.sort_values("volume_pulmao")
tmp_data.append(
dict(sorted_group.iloc[-1])
)
df_seg = | pd.DataFrame(tmp_data) | pandas.DataFrame |
import textwrap
from typing import List, Set
from pandas._libs import NaT, lib
import pandas.core.common as com
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_new_Index,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.category import CategoricalIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.interval import IntervalIndex
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.range import RangeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
_sort_msg = textwrap.dedent(
"""\
Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silence the warning, pass 'sort=True'.
"""
)
__all__ = [
"Index",
"MultiIndex",
"NumericIndex",
"Float64Index",
"Int64Index",
"CategoricalIndex",
"IntervalIndex",
"RangeIndex",
"UInt64Index",
"InvalidIndexError",
"TimedeltaIndex",
"PeriodIndex",
"DatetimeIndex",
"_new_Index",
"NaT",
"ensure_index",
"ensure_index_from_sequences",
"get_objs_combined_axis",
"union_indexes",
"get_consensus_names",
"all_indexes_same",
]
def get_objs_combined_axis(
objs, intersect: bool = False, axis=0, sort: bool = True, copy: bool = False
) -> Index:
"""
Extract combined index: return intersection or union (depending on the
value of "intersect") of indexes on given axis, or None if all objects
lack indexes (e.g. they are numpy arrays).
Parameters
----------
objs : list
Series or DataFrame objects, may be mix of the two.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
axis : {0 or 'index', 1 or 'outer'}, default 0
The axis to extract indexes from.
sort : bool, default True
Whether the result index should come out sorted or not.
copy : bool, default False
If True, return a copy of the combined index.
Returns
-------
Index
"""
obs_idxes = [obj._get_axis(axis) for obj in objs]
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
def _get_distinct_objs(objs: List[Index]) -> List[Index]:
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids: Set[int] = set()
res = []
for obj in objs:
if id(obj) not in ids:
ids.add(id(obj))
res.append(obj)
return res
def _get_combined_index(
indexes: List[Index],
intersect: bool = False,
sort: bool = False,
copy: bool = False,
) -> Index:
"""
Return the union or intersection of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
copy : bool, default False
If True, return a copy of the combined index.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
# GH 29879
if copy:
index = index.copy()
return index
def union_indexes(indexes, sort=True) -> Index:
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if len(indexes) == 0:
raise AssertionError("Must have at least 1 Index to union")
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds) -> Index:
"""
Convert indexes to lists and concatenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == "special":
result = indexes[0]
if hasattr(result, "union_many"):
# DatetimeIndex
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == "array":
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _unique_indices(indexes)
name = get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes)
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [
Index( | com.try_sort(x) | pandas.core.common.try_sort |
import re
import requests
import sys
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from pdb import set_trace as pb
max_fallback = 2
class Currency:
def __init__(self):
self.data = {}
self.data_hist = {}
def get(self, currency_pair):
'''
Parameters
----------
currency_pair : str
Returns
-------
dictionary of the currency pair
'''
if currency_pair not in self.data:
curr = get_historical_currency(currency_pair)
self.data[currency_pair] = curr.T.to_dict()[curr.index[0]]
return self.data[currency_pair]
def get_hist(self, currency_pair, dates):
if currency_pair not in self.data_hist:
self.data_hist[currency_pair] = get_historical_currency(currency_pair, dates)
return self.data_hist[currency_pair]
def fill(self):
'''
Fill entire data cross pair
'''
if self.data == {}: self.get('USD')
i = self.data.keys()[0]
for k in self.data[i].keys():
self.get(k)
def get_historical_currency(base, date=pd.datetime.today().strftime('%Y-%m-%d')):
'''
Parameters
----------
base : str
currency base
date : str/datetime - list
list of dates
Returns
-------
pandas dataframe of currency pairs
Example
-------
get_historical_currency(
'USD',
pd.bdate_range('2017-01-03', '2019-01-04')
)
'''
if type(date) in [list, pd.Series, pd.core.indexes.datetimes.DatetimeIndex]:
return pd.concat([get_historical_currency(base=base, date=d) for d in date]).sort_index()
date = pd.to_datetime(date).strftime('%Y-%m-%d')
url = 'https://www.xe.com/currencytables/?from={base_currency}&date={date}'.format(
base_currency=base,
date=date
)
count = 0
while count<=10:
try:
curr = pd.read_html(url)
assert curr.shape[1] >=4
break
except:
count+=1
curr = curr[0].iloc[:,]
curr['date'] = date
try:
curr = curr.iloc[:,[4,0,2]]
except:
print(curr)
print(date)
assert False
curr.columns=['date','currency','value']
curr = curr.pivot_table(values='value', index='date', columns='currency')
return curr
def _clean_bb_ticker(symbol, fallback):
if fallback == 0:
exchange_dict = {
'CN': 'TO',
'AU': 'AX',
'HK': 'HK',
'LN': 'L',
'TI': 'IS',
'SW': 'SW',
'US': None,
}
elif fallback == 1:
exchange_dict = {
'CN': 'V',
}
else:
exchange_dict = {}
symbol = symbol.upper()
symbol = symbol.replace(' EQUITY', '')
str_split = symbol.split(' ')
if len(str_split)==1: return symbol
symb, exchange = str_split
if exchange.upper() in exchange_dict:
correct_symbol = exchange_dict[exchange.upper()]
else:
print('Did not find symbol: {} in exchange_dict ({})'.format(exchange.upper(), symb))
correct_symbol = exchange.upper()
if correct_symbol != None:
symbol = symb+'.'+correct_symbol
else:
symbol = symb
return symbol
def statistics(symbols, currency=None, date=None, **args):
'''
Parameters
----------
symbols : str/list/pd.Series
symbols
convert_currency : None - str
convert to currency
e.g. ['USD', 'IDR', 'GBP', 'ETH', 'CAD',
'JPY', 'HUF', 'MYR', 'SEK', 'SGD', 'HKD',
'AUD', 'CHF', 'CNY', 'NZD', 'THB', 'EUR',
'RUB', 'INR', 'MXN', 'BTC', 'PHP', 'ZAR']
date : None, str/datetime
convert market cap and other price measures to
a previous date. Does not adjust for share count
changes
Returns
-------
pandas dataframe of stats from ticker
'''
convert_currency = currency
if '_curr' in args:
curr = args['_curr']
else:
curr = None
if type(symbols) in [list, pd.Series, set]:
global _currency
_currency = Currency()
return pd.concat([statistics(symb, currency=currency) for symb in symbols], sort=True)
elif not '_currency' in globals():
_currency = Currency()
if 'fallback' in args:
fallback = args['fallback']
else:
fallback = 0
ticker = _clean_bb_ticker(symbols, fallback)
url = 'https://finance.yahoo.com/quote/{ticker}/key-statistics'.format(
ticker=ticker
)
req = requests.get(url)
soup = BeautifulSoup(req.text, 'lxml')
main = soup.find_all('tr')
data = {}
dig_dict = {'B': 1000000000,'M': 1000000,'K': 1000}
for i in main:
table_cells = i.find_all('td')
if len(table_cells)==2:
k, v = table_cells
k = str(k.find_all('span')[0].getText())
try:
v = str(v.getText())
except:
v = pd.np.nan
try:
pd.to_datetime(v)
isdate = True
except:
isdate = False
try:
if v == pd.np.nan:
pass
elif str(v[-1]).upper() in dig_dict and str(v[:-1]).replace(',','').replace('.','').replace('-','').isdigit():
v = float(v[:-1])*dig_dict[v[-1].upper()]
elif (str(v[-1]) == '%') and (str(v)[:-1].replace(',','').replace('.','').replace('-','').isdigit()):
v = float(v[:-1])*1.0/100.0
elif (str(v).replace(',','').replace('.','').replace('-','').isdigit()):
v = float(v)
elif isdate:
v = pd.to_datetime(v).date().strftime('%Y-%m-%d')
except:
pass
data[k] = v
if data == {} and 'retry' not in args and fallback < max_fallback:
fallback += 1
data = statistics(symbols, fallback=fallback)
data.index = [symbols]
elif data == {} and 'retry' not in args:
data = statistics(symbols.split(' ')[0]+' Equity', retry=True)
else:
data = pd.DataFrame([data], index=[symbols])
if 'local_currency' not in data.columns:
spans = [i for i in soup.find_all('span') if 'Currency in' in i.get_text()]
spans = [i.get_text().split('Currency in ')[-1] for i in spans]
if spans!=[]:
data['local_currency'] = spans[0]
else:
data['local_currency'] = None
if convert_currency != None:
currency_divider = []
for iid, row in data.iterrows():
curr = _currency.get(row['local_currency'])
currency_divider.append(1/curr[convert_currency])
data['currency_divider'] = currency_divider
for col in ['EBITDA', 'Gross Profit', 'Levered Free Cash Flow', 'Market Cap (intraday)', 'Revenue',
'Operating Cash Flow', 'Revenue Per Share', 'Gross Profit', 'Net Income Avi to Common',
'Diluted EPS', 'Total Cash', 'Total Cash Per Share', 'Total Debt']:
if col in data.columns:
data[col] = pd.to_numeric(data[col].replace('N/A', np.nan), errors='ignore')/data['currency_divider']
if date != None:
prices = download(symbol=symbols, start_date=pd.to_datetime(date), end_date=pd.datetime.today().date())
multiplier = prices['Close'].iloc[0]/prices['Close'].iloc[-1]
for col in ['Market Cap (intraday)']:
if col in data.columns:
data[col]*=multiplier
return data
def get_currency(ticker):
'''
Parameters
----------
ticker : str
ticker
Returns
-------
currency that the ticker is priced in
'''
return statistics(ticker)['local_currency'].iloc[0]
def download(symbol, start_date, end_date, interval='1d', events='history', currency=None, **args):
'''
Parameters
----------
symbol : str/list/pd.Series
list of symbols
start_date : str/datetime
start date
end_date : str/datetime
end date
interval : str
'1d'
events : str
'history', 'div'
currency : str
currency to convert to
Returns
-------
pandas dataframe of prices
Example
-------
df = get_prices('AAPL', '2019-01-01', '2019-01-31')
'''
if 'fallback' in args:
fallback = args['fallback']
else:
fallback = 0
if type(symbol) is pd.Series:
symbol = symbol.tolist()
if '_currency' in args:
_currency = args['_currency']
else:
_currency = Currency()
if currency != None:
dates = | pd.bdate_range(start_date, end_date) | pandas.bdate_range |
import numpy as np
import pandas as pd
#
from ... import global_var, capacity
def compute_all_programs(df_outage,
list_plants = None,
):
"""
Computes the availability programs from the unavailabilty files
for each production asset at the different publication dates.
:param df_outage: The outages dataframe
:param list_plants: The list of production assets to consider
:type df_outage: pd.DataFrame
:type list_plants: list of strings
:return: A dictionary of the availability programs and
the problematic publications found
:rtype: (dict, dict)
"""
try:
capacity_end = capacity.unit.load(source = global_var.data_source_capacity_rte,
map_code = 'FR',
)
capacity_end = {k:v
for ii, (k, v) in capacity_end[[global_var.unit_name,
global_var.capacity_end_date_local,
]].iterrows()
if bool(v)
}
except FileNotFoundError:
capacity_end = {}
### Compute programs
if list_plants is None:
list_plants = sorted(set((df_outage[global_var.unit_name])))
dikt_programs = {}
dikt_bad_publications = {}
for ii, unit_name in enumerate(list_plants):
print('\rCompute program - {0:3}/{1:3} - {2:20}'.format(ii+1,
len(list_plants),
unit_name,
),
end = '',
)
df_unit = df_outage.loc[df_outage[global_var.unit_name] == unit_name]
dikt_programs[unit_name], dikt_bad_publications[unit_name] = compute_program(df_unit,
unit_name = unit_name,
capacity_end_date = capacity_end.get(unit_name),
)
print()
dikt_bad_publications = {k:v
for k, v in dikt_bad_publications.items()
if len(v) > 0
}
return dikt_programs, dikt_bad_publications
###############################################################################
def compute_program(dg,
unit_name = None,
capacity_end_date = None,
):
"""
Computes the availability programs
for one asset asset at the different publication dates.
:param dg: The outages dataframe for the considered unit
:param unit_name: The name of the unit
:param capacity_end_date: The nameplate capacity of the unit
:type dg: pd.DataFrame
:type unit_name: string
:type capacity_end_date: float or None
:return: The expected availabilty programs and
the set of problematic publications
:rtype: (pd.DataFrame, list)
"""
dg = dg.sort_index(level = global_var.publication_dt_UTC)
### Checks
assert dg.shape[0] > 0
assert len(dg[global_var.unit_name].unique()) == 1
### Publication dates
pub_dt_start = min(dg.index.get_level_values(global_var.publication_dt_UTC).min() - np.timedelta64(1, 'h'),
pd.to_datetime('2010-01-01 00:00').tz_localize('UTC'),
)
dt_publications = pd.DatetimeIndex( [pub_dt_start]
+ list(dg.index.get_level_values(global_var.publication_dt_UTC)),
name = global_var.publication_dt_UTC,
)
### Production Steps
prod_timesteps = np.sort(np.unique(dg[[global_var.outage_begin_dt_UTC,
global_var.outage_end_dt_UTC,
]]))
production_steps = pd.DatetimeIndex( [prod_timesteps.min() - np.timedelta64(1, 'h')]
+ list(prod_timesteps)
+ [prod_timesteps.max() + np.timedelta64(1, 'h')],
name = global_var.production_step_dt_UTC,
)
### Capacity
nameplate_capacity_max = max(dg[global_var.capacity_nominal_mw])
assert not np.isnan(nameplate_capacity_max)
# Init program
dikt_active = {}
program = pd.DataFrame(nameplate_capacity_max,
index = dt_publications,
columns = production_steps,
)
bad_publications = []
cancelled_publications = []
active_publication_dt = pd.Series(index = production_steps,
dtype = str,
)
### Include all updates
for ii, ((publi_id, version, publi_dt), publi) in enumerate(dg.iterrows()):
if publi_id in dikt_active:
### Get previous version of publication
prev = dikt_active[publi_id]
prev_outage_begin = prev[global_var.outage_begin_dt_UTC]
prev_outage_end = prev[global_var.outage_end_dt_UTC]
### Get the nameplate capacity
prev_nameplate_capacity = prev[global_var.capacity_nominal_mw]
if np.isnan(prev_nameplate_capacity):
prev_nameplate_capacity = nameplate_capacity_max
### Identify where previous publication is still the most recent
active_outage_window = active_publication_dt.loc[prev_outage_begin:prev_outage_end].iloc[:-1]
prev_outage_active = active_outage_window.loc[(active_outage_window.values == publi_id)].index
### Reset the capacity
if not prev_outage_active.empty:
prev_still_active = [program.columns.get_loc(dd)
for dd in prev_outage_active
]
program.iloc[ii+1:,prev_still_active] = prev_nameplate_capacity
del dikt_active[publi_id]
else:
### Check coherence
first_version = (version == 1)
publi_was_cancelled = (publi_id in cancelled_publications)
publi_being_created = (publi[global_var.publication_creation_dt_UTC] == publi_dt)
if not ( first_version
or publi_was_cancelled
or publi_being_created
):
reasons = '+'.join([pb
for (pb, correct) in [('pb_version', first_version),
('pb_cancelled_publi', publi_was_cancelled),
('publi_creation_dt', publi_being_created),
]
if not correct
])
bad_publications.append((reasons, publi))
### Add effect of the new publication
if publi[global_var.outage_status] == global_var.outage_status_cancelled:
cancelled_publications.append(publi_id)
else:
remaining_power_mw = publi[global_var.capacity_available_mw]
correction_begin = publi[global_var.outage_begin_dt_UTC]
correction_end = publi[global_var.outage_end_dt_UTC]
nameplate_capacity = publi[global_var.capacity_nominal_mw]
if np.isnan(nameplate_capacity):
nameplate_capacity = nameplate_capacity_max
cond_capacity_end = ( bool(capacity_end_date)
and correction_end >= capacity_end_date
)
if cond_capacity_end and (nameplate_capacity == 0):
slice_correction = slice(program.columns.get_loc(correction_begin),
None,
)
else:
slice_correction = slice(program.columns.get_loc(correction_begin),
program.columns.get_loc(correction_end),
)
program.iloc[ii+1:,slice_correction] = remaining_power_mw
active_publication_dt.iloc[slice_correction] = publi_id
dikt_active[publi_id] = publi
### Eliminate the first of simultaneous publications
program = program.groupby(global_var.publication_dt_UTC).tail(1)
### Checks
assert not | pd.isnull(program.index.values) | pandas.isnull |
from typing import Dict, Optional, Tuple, Union
import numpy as np
import pandas as pd
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
from sklearn.feature_extraction.text import TfidfVectorizer
class TextTFIDFTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self,
column,
random_state: Optional[np.random.RandomState] = None):
self.column = column
self.random_state = random_state
def fit(self,
X: PIPELINE_DATA_DTYPE,
y: Optional[PIPELINE_DATA_DTYPE] = None) -> 'TextTFIDFTransformer':
self.preprocessor = TextTFIDFTransformerOriginal()
self.preprocessor.fit(X, self.column)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': 'TextTFIDFTransformer',
'name': 'Text TFIDF Transformer',
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
# TODO find out of this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
}
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> ConfigurationSpace:
return ConfigurationSpace()
class TextTFIDFTransformerOriginal(object):
def __init__(self):
self._new_columns = []
self._old_column = None
self._max_features = 100
self._vectorizer = None
def fit(self, X, column):
self._old_column = column
self._vectorizer = TfidfVectorizer(
analyzer='word',
stop_words='english',
lowercase=True,
max_features=self._max_features,
)
x = X[column][~ | pd.isnull(X[column]) | pandas.isnull |
from .utils import *
import pandas as pd
def calculateWeight(monthlyData, sinStaProp):
years = monthlyData.index.year.unique()
if sinStaProp == 'Mean':
func = cal_mean
elif sinStaProp == 'CV':
func = cal_CVAR
elif sinStaProp == 'AR-1':
func = cal_AR1
elif sinStaProp == 'Skewness':
func = cal_skewness
stat_list = [func(monthlyData[monthlyData.index.year == curYear])
for curYear in years]
# return 1.0 / np.var(stat_list) if len(years) > 4 else (1 / np.mean(stat_list)) ** 2
return 1.0 / np.var(stat_list)
def cal_sta_prop(sub_data):
sub_data_1h = sub_data.resample('1H').sum()
sub_data_6h = sub_data.resample('6H').sum()
sub_data_24h = sub_data.resample('1D').sum()
mean = cal_mean(sub_data_1h)
mean = mean if type(mean) == float else mean.item()
return_array = np.array([
mean,
cal_CVAR(sub_data),
cal_AR1(sub_data.to_numpy().flatten()),
cal_skewness(sub_data),
cal_CVAR(sub_data_1h),
cal_AR1(sub_data_1h.to_numpy().flatten()),
cal_skewness(sub_data_1h),
cal_CVAR(sub_data_6h),
cal_AR1(sub_data_6h.to_numpy().flatten()),
cal_skewness(sub_data_6h),
cal_CVAR(sub_data_24h),
cal_AR1(sub_data_24h.to_numpy().flatten()),
cal_skewness(sub_data_24h), ])
return return_array
def create_stats_file(rawData, propertyList, timeScaleList, outputPath, weightFile_path):
month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
data = {}
weight = {}
data['Month'] = month
weight['Month'] = month
# 先算mean
subSeq = change_timescale(rawData, '1h')
col_data = []
col_weight = []
for m in range(1, 13):
monthlyData = subSeq[subSeq.index.month == m]
mData = monthlyData.mean().item()
mWeight = calculateWeight(monthlyData, 'Mean')
col_data.append(round(mData, 7))
col_weight.append(round(mWeight, 7))
data['Mean_60'] = col_data
weight['Mean_60'] = col_weight
# 算其他的
for sinTimescale in timeScaleList:
scaledData = change_timescale(rawData, sinTimescale)
for sinStaProp in propertyList:
col_data = []
col_weight = []
for curMonth in range(1, 13):
monthlyData = scaledData[scaledData.index.month == curMonth]
mWeight = calculateWeight(monthlyData, sinStaProp)
input = monthlyData.to_numpy().flatten()
if sinStaProp == 'CV':
ret = cal_CVAR(input)
elif sinStaProp == 'AR-1':
ret = cal_AR1(input)
elif sinStaProp == 'Skewness':
ret = cal_skewness(input)
col_data.append(round(ret, 7))
col_weight.append(round(mWeight, 7))
data['{}_{}'.format(str(sinStaProp), str(sinTimescale))] = col_data
weight['{}_{}'.format(
str(sinStaProp), str(sinTimescale))] = col_weight
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Interfaces to generate reportlets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path as op
import time
import json
import re
from collections import defaultdict
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.io.matlab import loadmat
import pandas as pd
import numpy as np
from nipype.interfaces.base import (
traits, TraitedSpec, BaseInterfaceInputSpec,
File, Directory, InputMultiPath, InputMultiObject, Str, isdefined,
SimpleInterface)
from nipype.interfaces import freesurfer as fs
from nipype.interfaces.mixins import reporting
import nibabel as nb
from dipy.core.sphere import HemiSphere
from .gradients import concatenate_bvals, concatenate_bvecs
from .qc import createB0_ColorFA_Mask_Sprites, createSprite4D
from .bids import get_bids_params
from ..niworkflows.viz.utils import peak_slice_series, odf_roi_plot
from .converters import fib2amps, mif2amps
SUBJECT_TEMPLATE = """\t<ul class="elem-desc">
\t\t<li>Subject ID: {subject_id}</li>
\t\t<li>Structural images: {n_t1s:d} T1-weighted {t2w}</li>
\t\t<li>Diffusion-weighted series: inputs {n_dwis:d}, outputs {n_outputs:d}</li>
{groupings}
\t\t<li>Resampling targets: {output_spaces}
\t\t<li>FreeSurfer reconstruction: {freesurfer_status}</li>
\t</ul>
"""
DIFFUSION_TEMPLATE = """\t\t<h3 class="elem-title">Summary</h3>
\t\t<ul class="elem-desc">
\t\t\t<li>Phase-encoding (PE) direction: {pedir}</li>
\t\t\t<li>Susceptibility distortion correction: {sdc}</li>
\t\t\t<li>Coregistration Transform: {coregistration}</li>
\t\t\t<li>Denoising Window: {denoise_window}</li>
\t\t\t<li>HMC Transform: {hmc_transform}</li>
\t\t\t<li>HMC Model: {hmc_model}</li>
\t\t\t<li>DWI series resampled to spaces: {output_spaces}</li>
\t\t\t<li>Confounds collected: {confounds}</li>
\t\t\t<li>Impute slice threshold: {impute_slice_threshold}</li>
\t\t</ul>
{validation_reports}
"""
ABOUT_TEMPLATE = """\t<ul>
\t\t<li>qsiprep version: {version}</li>
\t\t<li>qsiprep command: <code>{command}</code></li>
\t\t<li>Date preprocessed: {date}</li>
\t</ul>
</div>
"""
TOPUP_TEMPLATE = """\
\t\t<p class="elem-desc">
\t\t{summary}</p>
"""
GROUPING_TEMPLATE = """\t<ul>
\t\t<li>Output Name: {output_name}</li>
{input_files}
</ul>
"""
INTERACTIVE_TEMPLATE = """
<script src="https://unpkg.com/vue"></script>
<script src="https://nipreps.github.io/dmriprep-viewer/dmriprepReport.umd.min.js"></script>
<link rel="stylesheet" href="https://nipreps.github.io/dmriprep-viewer/dmriprepReport.css">
<div id="app">
<demo :report="report"></demo>
</div>
<script>
var report = REPORT
new Vue({
components: {
demo: dmriprepReport
},
data () {
return {
report
}
}
}).$mount('#app')
</script>
"""
class SummaryOutputSpec(TraitedSpec):
out_report = File(exists=True, desc='HTML segment containing summary')
class SummaryInterface(SimpleInterface):
output_spec = SummaryOutputSpec
def _generate_segment(self):
raise NotImplementedError()
def _run_interface(self, runtime):
segment = self._generate_segment()
fname = os.path.join(runtime.cwd, 'report.html')
with open(fname, 'w') as fobj:
fobj.write(segment)
self._results['out_report'] = fname
return runtime
class SubjectSummaryInputSpec(BaseInterfaceInputSpec):
t1w = InputMultiPath(File(exists=True), desc='T1w structural images')
t2w = InputMultiPath(File(exists=True), desc='T2w structural images')
subjects_dir = Directory(desc='FreeSurfer subjects directory')
subject_id = Str(desc='Subject ID')
dwi_groupings = traits.Dict(desc='groupings of DWI files and their output names')
output_spaces = traits.List(desc='Target spaces')
template = traits.Enum('MNI152NLin2009cAsym', desc='Template space')
class SubjectSummaryOutputSpec(SummaryOutputSpec):
# This exists to ensure that the summary is run prior to the first ReconAll
# call, allowing a determination whether there is a pre-existing directory
subject_id = Str(desc='FreeSurfer subject ID')
class SubjectSummary(SummaryInterface):
input_spec = SubjectSummaryInputSpec
output_spec = SubjectSummaryOutputSpec
def _run_interface(self, runtime):
if isdefined(self.inputs.subject_id):
self._results['subject_id'] = self.inputs.subject_id
return super(SubjectSummary, self)._run_interface(runtime)
def _generate_segment(self):
if not isdefined(self.inputs.subjects_dir):
freesurfer_status = 'Not run'
else:
recon = fs.ReconAll(subjects_dir=self.inputs.subjects_dir,
subject_id=self.inputs.subject_id,
T1_files=self.inputs.t1w,
flags='-noskullstrip')
if recon.cmdline.startswith('echo'):
freesurfer_status = 'Pre-existing directory'
else:
freesurfer_status = 'Run by qsiprep'
output_spaces = [self.inputs.template if space == 'template' else space
for space in self.inputs.output_spaces]
t2w_seg = ''
if self.inputs.t2w:
t2w_seg = '(+ {:d} T2-weighted)'.format(len(self.inputs.t2w))
# Add text for how the dwis are grouped
n_dwis = 0
n_outputs = 0
groupings = ''
if isdefined(self.inputs.dwi_groupings):
for output_fname, group_info in self.inputs.dwi_groupings.items():
n_outputs += 1
files_desc = []
files_desc.append(
'\t\t\t<li>Scan group: %s (PE Dir %s)</li><ul>' % (
output_fname, group_info['dwi_series_pedir']))
files_desc.append('\t\t\t\t<li>DWI Files: </li>')
for dwi_file in group_info['dwi_series']:
files_desc.append("\t\t\t\t\t<li> %s </li>" % dwi_file)
n_dwis += 1
fieldmap_type = group_info['fieldmap_info']['suffix']
if fieldmap_type is not None:
files_desc.append('\t\t\t\t<li>Fieldmap type: %s </li>' % fieldmap_type)
for key, value in group_info['fieldmap_info'].items():
files_desc.append("\t\t\t\t\t<li> %s: %s </li>" % (key, str(value)))
n_dwis += 1
files_desc.append("</ul>")
groupings += GROUPING_TEMPLATE.format(output_name=output_fname,
input_files='\n'.join(files_desc))
return SUBJECT_TEMPLATE.format(subject_id=self.inputs.subject_id,
n_t1s=len(self.inputs.t1w),
t2w=t2w_seg,
n_dwis=n_dwis,
n_outputs=n_outputs,
groupings=groupings,
output_spaces=', '.join(output_spaces),
freesurfer_status=freesurfer_status)
class DiffusionSummaryInputSpec(BaseInterfaceInputSpec):
distortion_correction = traits.Str(desc='Susceptibility distortion correction method',
mandatory=True)
pe_direction = traits.Enum(None, 'i', 'i-', 'j', 'j-', mandatory=True,
desc='Phase-encoding direction detected')
distortion_correction = traits.Str(mandatory=True, desc='Method used for SDC')
impute_slice_threshold = traits.CFloat(desc='threshold for imputing a slice')
hmc_transform = traits.Str(mandatory=True, desc='transform used during HMC')
hmc_model = traits.Str(desc='model used for hmc')
b0_to_t1w_transform = traits.Enum("Rigid", "Affine", desc='Transform type for coregistration')
dwi_denoise_window = traits.Int(desc='window size for dwidenoise')
output_spaces = traits.List(desc='Target spaces')
confounds_file = File(exists=True, desc='Confounds file')
validation_reports = InputMultiObject(File(exists=True))
class DiffusionSummary(SummaryInterface):
input_spec = DiffusionSummaryInputSpec
def _generate_segment(self):
if self.inputs.pe_direction is None:
pedir = 'MISSING - Assuming Anterior-Posterior'
else:
pedir = {'i': 'Left-Right', 'j': 'Anterior-Posterior'}[self.inputs.pe_direction[0]]
if isdefined(self.inputs.confounds_file):
with open(self.inputs.confounds_file) as cfh:
conflist = cfh.readline().strip('\n').strip()
else:
conflist = ''
validation_summaries = []
for summary in self.inputs.validation_reports:
with open(summary, 'r') as summary_f:
validation_summaries.extend(summary_f.readlines())
validation_summary = '\n'.join(validation_summaries)
return DIFFUSION_TEMPLATE.format(
pedir=pedir,
sdc=self.inputs.distortion_correction,
coregistration=self.inputs.b0_to_t1w_transform,
hmc_transform=self.inputs.hmc_transform,
hmc_model=self.inputs.hmc_model,
denoise_window=self.inputs.dwi_denoise_window,
output_spaces=', '.join(self.inputs.output_spaces),
confounds=re.sub(r'[\t ]+', ', ', conflist),
impute_slice_threshold=self.inputs.impute_slice_threshold,
validation_reports=validation_summary
)
class AboutSummaryInputSpec(BaseInterfaceInputSpec):
version = Str(desc='qsiprep version')
command = Str(desc='qsiprep command')
# Date not included - update timestamp only if version or command changes
class AboutSummary(SummaryInterface):
input_spec = AboutSummaryInputSpec
def _generate_segment(self):
return ABOUT_TEMPLATE.format(version=self.inputs.version,
command=self.inputs.command,
date=time.strftime("%Y-%m-%d %H:%M:%S %z"))
class TopupSummaryInputSpec(BaseInterfaceInputSpec):
summary = Str(desc='Summary of TOPUP inputs')
class TopupSummary(SummaryInterface):
input_spec = TopupSummaryInputSpec
def _generate_segment(self):
return TOPUP_TEMPLATE.format(summary=self.inputs.summary)
class GradientPlotInputSpec(BaseInterfaceInputSpec):
orig_bvec_files = InputMultiObject(File(exists=True), mandatory=True,
desc='bvecs from DWISplit')
orig_bval_files = InputMultiObject(File(exists=True), mandatory=True,
desc='bvals from DWISplit')
source_files = traits.List(desc='source file for each gradient')
final_bvec_file = File(exists=True, desc='bval file')
class GradientPlotOutputSpec(SummaryOutputSpec):
plot_file = File(exists=True)
class GradientPlot(SummaryInterface):
input_spec = GradientPlotInputSpec
output_spec = GradientPlotOutputSpec
def _run_interface(self, runtime):
outfile = os.path.join(runtime.cwd, "bvec_plot.gif")
sns.set_style("whitegrid")
sns.set_context("paper", font_scale=0.8)
orig_bvecs = concatenate_bvecs(self.inputs.orig_bvec_files)
bvals = concatenate_bvals(self.inputs.orig_bval_files, None)
if isdefined(self.inputs.source_files):
file_array = np.array(self.inputs.source_files)
_, filenums = np.unique(file_array, return_inverse=True)
else:
filenums = np.ones_like(bvals)
# Account for the possibility that this is a PE Pair average
if len(filenums) == len(bvals) * 2:
filenums = filenums[:len(bvals)]
# Plot the final bvecs if provided
final_bvecs = None
if isdefined(self.inputs.final_bvec_file):
final_bvecs = np.loadtxt(self.inputs.final_bvec_file).T
plot_gradients(bvals, orig_bvecs, filenums, outfile, final_bvecs)
self._results['plot_file'] = outfile
return runtime
def plot_gradients(bvals, orig_bvecs, source_filenums, output_fname, final_bvecs=None,
frames=60):
qrads = np.sqrt(bvals)
qvecs = (qrads[:, np.newaxis] * orig_bvecs)
qx, qy, qz = qvecs.T
maxvals = qvecs.max(0)
minvals = qvecs.min(0)
def add_lines(ax):
labels = ['L', 'P', 'S']
for axnum in range(3):
minvec = np.zeros(3)
maxvec = np.zeros(3)
minvec[axnum] = minvals[axnum]
maxvec[axnum] = maxvals[axnum]
x, y, z = np.column_stack([minvec, maxvec])
ax.plot(x, y, z, color="k")
txt_pos = maxvec + 5
ax.text(txt_pos[0], txt_pos[1], txt_pos[2], labels[axnum], size=8,
zorder=1, color='k')
if final_bvecs is not None:
if final_bvecs.shape[0] == 3:
final_bvecs = final_bvecs.T
fqx, fqy, fqz = (qrads[:, np.newaxis] * final_bvecs).T
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5),
subplot_kw={"aspect": "equal", "projection": "3d"})
orig_ax = axes[0]
final_ax = axes[1]
axes_list = [orig_ax, final_ax]
final_ax.scatter(fqx, fqy, fqz, c=source_filenums, marker="+")
orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+")
final_ax.axis('off')
add_lines(final_ax)
final_ax.set_title('After Preprocessing')
else:
fig, orig_ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5),
subplot_kw={"aspect": "equal", "projection": "3d"})
axes_list = [orig_ax]
orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+")
orig_ax.axis('off')
orig_ax.set_title("Original Scheme")
add_lines(orig_ax)
# Animate rotating the axes
rotate_amount = np.ones(frames) * 180 / frames
stay_put = np.zeros_like(rotate_amount)
rotate_azim = np.concatenate([rotate_amount, stay_put, -rotate_amount, stay_put])
rotate_elev = np.concatenate([stay_put, rotate_amount, stay_put, -rotate_amount])
plt.tight_layout()
def rotate(i):
for ax in axes_list:
ax.azim += rotate_azim[i]
ax.elev += rotate_elev[i]
return tuple(axes_list)
anim = animation.FuncAnimation(fig, rotate, frames=frames*4,
interval=20, blit=False)
anim.save(output_fname, writer='imagemagick', fps=32)
plt.close(fig)
fig = None
def topup_selection_to_report(selected_indices, original_files, spec_lookup,
image_source='combined DWI series'):
"""Write a description of how the images were selected for TOPUP.
>>> selected_indices = [0, 15, 30, 45]
>>> original_files = ["sub-1_dir-AP_dwi.nii.gz"] * 30 + ["sub-1_dir-PA_dwi.nii.gz"] * 30
>>> spec_lookup = {"sub-1_dir-AP_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 2 distortion groups was included in the combined dwi data. Distortion \
group '0 1 0 0.087' was represented by images 0, 15 from sub-1_dir-AP_dwi.nii.gz. \
Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. "
Or
>>> selected_indices = [0, 15, 30, 45]
>>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [
... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [
... "sub-1_dir-PA_dwi.nii.gz"] * 30
>>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 2 distortion groups was included in the combined dwi data. Distortion \
group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz and \
image 0 from sub-1_dir-AP_run-2_dwi.nii.gz. Distortion group '0 -1 0 0.087' was represented \
by images 0, 15 from sub-1_dir-PA_dwi.nii.gz.
>>> selected_indices = [0, 15, 30, 45, 60]
>>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [
... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [
... "sub-1_dir-AP_run-3_dwi.nii.gz"] * 15 + [
... "sub-1_dir-PA_dwi.nii.gz"] * 30
>>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-AP_run-3_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 2 distortion groups was included in the combined dwi data. Distortion \
group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz, \
image 0 from sub-1_dir-AP_run-2_dwi.nii.gz and image 0 from sub-1_dir-AP_run-3_dwi.nii.gz. \
Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz.
>>> selected_indices = [0, 15, 30, 45]
>>> original_files = ["sub-1_dir-PA_dwi.nii.gz"] * 60
>>> spec_lookup = {"sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 1 distortion group was included in the combined dwi data. \
Distortion group '0 -1 0 0.087' was represented by images 0, 15, 30, 45 \
from sub-1_dir-PA_dwi.nii.gz.
"""
image_indices = defaultdict(list)
for imgnum, image in enumerate(original_files):
image_indices[image].append(imgnum)
# Collect the original volume number within each source image
selected_per_image = defaultdict(list)
for b0_index in selected_indices:
b0_image = original_files[b0_index]
first_index = min(image_indices[b0_image])
within_image_index = b0_index - first_index
selected_per_image[b0_image].append(within_image_index)
# Collect the images and indices within each warp group
selected_per_warp_group = defaultdict(list)
for original_image, selection in selected_per_image.items():
warp_group = spec_lookup[original_image]
selected_per_warp_group[warp_group].append((original_image, selection))
# Make the description
num_groups = len(selected_per_warp_group)
plural = 's' if num_groups > 1 else ''
plural2 = 'were' if plural == 's' else 'was'
desc = ["A total of {num_groups} distortion group{plural} {plural2} included in the "
"{image_source} data. ".format(num_groups=num_groups, plural=plural,
plural2=plural2, image_source=image_source)]
for distortion_group, image_list in selected_per_warp_group.items():
group_desc = [
"Distortion group '{spec}' was represented by ".format(spec=distortion_group)]
for image_name, image_indices in image_list:
formatted_indices = ", ".join(map(str, image_indices))
plural = 's' if len(image_indices) > 1 else ''
group_desc += [
"image{plural} {imgnums} from {img_name}".format(plural=plural,
imgnums=formatted_indices,
img_name=image_name),
", "]
group_desc[-1] = ". "
if len(image_list) > 1:
group_desc[-3] = " and "
desc += group_desc
return ''.join(desc)
class _SeriesQCInputSpec(BaseInterfaceInputSpec):
pre_qc = File(exists=True, desc='qc file from the raw data')
t1_qc = File(exists=True, desc='qc file from preprocessed image in t1 space')
mni_qc = File(exists=True, desc='qc file from preprocessed image in template space')
confounds_file = File(exists=True, desc='confounds file')
t1_dice_score = traits.Float()
mni_dice_score = traits.Float()
output_file_name = traits.File()
class _SeriesQCOutputSpec(TraitedSpec):
series_qc_file = File(exists=True)
class SeriesQC(SimpleInterface):
input_spec = _SeriesQCInputSpec
output_spec = _SeriesQCOutputSpec
def _run_interface(self, runtime):
image_qc = _load_qc_file(self.inputs.pre_qc, prefix="raw_")
if isdefined(self.inputs.t1_qc):
image_qc.update(_load_qc_file(self.inputs.t1_qc, prefix="t1_"))
if isdefined(self.inputs.mni_qc):
image_qc.update(_load_qc_file(self.inputs.mni_qc, prefix="mni_"))
motion_summary = calculate_motion_summary(self.inputs.confounds_file)
image_qc.update(motion_summary)
# Add in Dice scores if available
if isdefined(self.inputs.t1_dice_score):
image_qc['t1_dice_distance'] = [self.inputs.t1_dice_score]
if isdefined(self.inputs.mni_dice_score):
image_qc['mni_dice_distance'] = [self.inputs.mni_dice_score]
# Get the metadata
output_file = self.inputs.output_file_name
image_qc['file_name'] = output_file
bids_info = get_bids_params(output_file)
image_qc.update(bids_info)
output = op.join(runtime.cwd, "dwi_qc.csv")
pd.DataFrame(image_qc).to_csv(output, index=False)
self._results['series_qc_file'] = output
return runtime
def _load_qc_file(fname, prefix=""):
qc_data = pd.read_csv(fname).to_dict(orient='records')[0]
renamed = dict([
(prefix + key, value) for key, value in qc_data.items()])
return renamed
def motion_derivatives(translations, rotations, framewise_disp,
original_files):
def padded_diff(data):
out = np.zeros_like(data)
out[1:] = np.diff(data, axis=0)
return out
drotations = padded_diff(rotations)
dtranslations = padded_diff(translations)
# We don't want the relative values across the boundaries of runs.
# Determine which values should be ignored
file_labels, _ = | pd.factorize(original_files) | pandas.factorize |
"""
Map words into vectors using different algorithms such as TF-IDF, word2vec or GloVe.
"""
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, NMF
from sklearn.cluster import KMeans, DBSCAN, MeanShift
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize as sklearn_normalize
from scipy.sparse import coo_matrix
from typing import Optional, Union, Any
from texthero import preprocessing
import logging
import warnings
# from texthero import pandas_ as pd_
"""
Helper
"""
def flatten(
s: Union[pd.Series, pd.Series.sparse],
index: pd.Index = None,
fill_missing_with: Any = 0.0,
) -> pd.Series:
"""
Transform a Pandas Representation Series to a "normal" (flattened) Pandas Series.
The given Series should have a multiindex with first level being the document
and second level being individual features of that document (e.g. tdidf scores per word).
The flattened Series has one cell per document, with the cell being a list of all
the individual features of that document.
Parameters
----------
s : Sparse Pandas Series or Pandas Series
The multiindexed Pandas Series to flatten.
index : Pandas Index, optional, default to None
The index the flattened Series should have.
fill_missing_with : Any, default to 0.0
Value to fill the NaNs (missing values) with. This _does not_ mean
that existing values that are np.nan are replaced, but rather that
features that are not present in one document but present in others
are filled with fill_missing_with. See example below.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> import numpy as np
>>> index = pd.MultiIndex.from_tuples([("doc0", "Word1"), ("doc0", "Word3"), ("doc1", "Word2")], names=['document', 'word'])
>>> s = pd.Series([3, np.nan, 4], index=index)
>>> s
document word
doc0 Word1 3.0
Word3 NaN
doc1 Word2 4.0
dtype: float64
>>> hero.flatten(s, fill_missing_with=0.0)
document
doc0 [3.0, 0.0, nan]
doc1 [0.0, 4.0, 0.0]
dtype: object
"""
s = s.unstack(fill_value=fill_missing_with)
if index is not None:
s = s.reindex(index, fill_value=fill_missing_with)
# Reindexing makes the documents for which no values
# are present in the Sparse Representation Series
# "reappear" correctly.
s = pd.Series(s.values.tolist(), index=s.index)
return s
def _check_is_valid_representation(s: pd.Series) -> bool:
"""
Check if the given Pandas Series is a Document Representation Series.
Returns true if Series is Document Representation Series, else False.
"""
# TODO: in Version 2 when only representation is accepted as input -> change "return False" to "raise ValueError"
if not isinstance(s.index, pd.MultiIndex):
return False
# raise ValueError(
# f"The input Pandas Series should be a Representation Pandas Series and should have a MultiIndex. The given Pandas Series does not appears to have MultiIndex"
# )
if s.index.nlevels != 2:
return False
# raise ValueError(
# f"The input Pandas Series should be a Representation Pandas Series and should have a MultiIndex, where the first level represent the document and the second one the words/token. The given Pandas Series has {s.index.nlevels} number of levels instead of 2."
# )
return True
# Warning message for not-tokenized inputs
_not_tokenized_warning_message = (
"It seems like the given Pandas Series s is not tokenized. This function will"
" tokenize it automatically using hero.tokenize(s) first. You should consider"
" tokenizing it yourself first with hero.tokenize(s) in the future."
)
"""
Vectorization
"""
def count(
s: pd.Series,
max_features: Optional[int] = None,
min_df=1,
max_df=1.0,
binary=False,
) -> pd.Series:
"""
Represent a text-based Pandas Series using count.
Return a Document Representation Series with the
number of occurences of a document's words for every
document.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before count is calculated.
Use :meth:`hero.representation.flatten` on the output to get
a standard Pandas Series with the document vectors
in every cell.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
Maximum number of features to keep. Will keep all features if set to None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they appear in)
frequency strictly higher than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
binary : bool, default=False
If True, all non zero counts are set to 1.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Sentence one", "Sentence two"]).pipe(hero.tokenize)
>>> hero.count(s)
0 Sentence 1
one 1
1 Sentence 1
two 1
dtype: Sparse[int64, 0]
See Also
--------
Document Representation Series: TODO add tutorial link
"""
# TODO. Can be rewritten without sklearn.
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tf = CountVectorizer(
max_features=max_features,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
min_df=min_df,
max_df=max_df,
binary=binary,
)
tf_vectors_csr = tf.fit_transform(s)
tf_vectors_coo = coo_matrix(tf_vectors_csr)
s_out = pd.Series.sparse.from_coo(tf_vectors_coo)
features_names = tf.get_feature_names()
# Map word index to word name
s_out.index = s_out.index.map(lambda x: (s.index[x[0]], features_names[x[1]]))
return s_out
def term_frequency(
s: pd.Series, max_features: Optional[int] = None, min_df=1, max_df=1.0,
) -> pd.Series:
"""
Represent a text-based Pandas Series using term frequency.
Return a Document Representation Series with the
term frequencies of the terms for every
document.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before term_frequency is calculated.
Use :meth:`hero.representation.flatten` on the output to get
a standard Pandas Series with the document vectors
in every cell.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
Maximum number of features to keep. Will keep all features if set to None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they appear in)
frequency strictly higher than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Sentence one hey", "Sentence two"]).pipe(hero.tokenize)
>>> hero.term_frequency(s)
0 Sentence 0.2
hey 0.2
one 0.2
1 Sentence 0.2
two 0.2
dtype: Sparse[float64, nan]
See Also
--------
Document Representation Series: TODO add tutorial link
"""
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tf = CountVectorizer(
max_features=max_features,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
min_df=min_df,
max_df=max_df,
)
tf_vectors_csr = tf.fit_transform(s)
tf_vectors_coo = coo_matrix(tf_vectors_csr)
total_count_coo = np.sum(tf_vectors_coo)
frequency_coo = np.divide(tf_vectors_coo, total_count_coo)
s_out = pd.Series.sparse.from_coo(frequency_coo)
features_names = tf.get_feature_names()
# Map word index to word name
s_out.index = s_out.index.map(lambda x: (s.index[x[0]], features_names[x[1]]))
return s_out
def tfidf(s: pd.Series, max_features=None, min_df=1, max_df=1.0,) -> pd.Series:
"""
Represent a text-based Pandas Series using TF-IDF.
*Term Frequency - Inverse Document Frequency (TF-IDF)* is a formula to
calculate the _relative importance_ of the words in a document, taking
into account the words' occurences in other documents. It consists of two parts:
The *term frequency (tf)* tells us how frequently a term is present in a document,
so tf(document d, term t) = number of times t appears in d.
The *inverse document frequency (idf)* measures how _important_ or _characteristic_
a term is among the whole corpus (i.e. among all documents).
Thus, idf(term t) = log((1 + number of documents) / (1 + number of documents where t is present)) + 1.
Finally, tf-idf(document d, term t) = tf(d, t) * idf(t).
Different from the `sklearn-implementation of
tfidf <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`,
this function does *not* normalize the output in any way,
so the result is exactly what you
get applying the formula described above.
Return a Document Representation Series with the
tfidf of every word in the document.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before tfidf is calculated.
If working with big pandas Series, you might want to limit
the number of features through the max_features parameter.
Use :meth:`hero.representation.flatten` on the output to get
a standard Pandas Series with the document vectors
in every cell.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
If not None, only the max_features most frequent tokens are used.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they appear in)
frequency strictly higher than the given threshold.
This arguments basically permits to remove corpus-specific stop words.
If float, the parameter represents a proportion of documents, integer
absolute counts.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Hi Bye", "Test Bye Bye"]).pipe(hero.tokenize)
>>> hero.tfidf(s)
0 Bye 1.000000
Hi 1.405465
1 Bye 2.000000
Test 1.405465
dtype: Sparse[float64, nan]
See Also
--------
`TF-IDF on Wikipedia <https://en.wikipedia.org/wiki/Tf-idf>`_
Document Representation Series: TODO add tutorial link
"""
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tfidf = TfidfVectorizer(
use_idf=True,
max_features=max_features,
min_df=min_df,
max_df=max_df,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
norm=None, # Disable l1/l2 normalization.
)
tfidf_vectors_csr = tfidf.fit_transform(s)
# Result from sklearn is in Compressed Sparse Row format.
# Pandas Sparse Series can only be initialized from Coordinate format.
tfidf_vectors_coo = coo_matrix(tfidf_vectors_csr)
s_out = pd.Series.sparse.from_coo(tfidf_vectors_coo)
# Map word index to word name and keep original index of documents.
feature_names = tfidf.get_feature_names()
s_out.index = s_out.index.map(lambda x: (s.index[x[0]], feature_names[x[1]]))
return s_out
"""
Dimensionality reduction
"""
def pca(s, n_components=2, random_state=None) -> pd.Series:
"""
Perform principal component analysis on the given Pandas Series.
Principal Component Analysis (PCA) is a statistical method that is used
to reveal where the variance in a dataset comes from. For textual data,
one could for example first represent a Series of documents using
:meth:`texthero.representation.tfidf` to get a vector representation
of each document. Then, PCA can generate new vectors from the tfidf representation
that showcase the differences among the documents most strongly in fewer dimensions.
For example, the tfidf vectors will have length 100 if hero.tfidf was called
on a large corpus with max_features=100. Visualizing 100 dimensions is hard!
Using PCA with n_components=3, every document will now get a vector of
length 3, and the vectors will be chosen so that the document differences
are easily visible. The corpus can now be visualized in 3D and we can
get a good first view of the data!
In general, *pca* should be called after the text has already been represented to a matrix form.
Parameters
----------
s : Pandas Series
n_components : Int. Default is 2.
Number of components to keep (dimensionality of output vectors).
If n_components is not set or None, all components are kept.
random_state : int, default=None
Pass an int for reproducible results across multiple function calls.
Returns
-------
Pandas Series with the vector calculated by PCA for the document in every cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football is great", "Hi, I'm Texthero, who are you? Tell me!"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.tfidf)
>>> # Attention, your results might differ due to
>>> # the randomness in PCA!
>>> hero.pca(s) # doctest: +SKIP
document
0 [1.5713577608669735, 1.1102230246251565e-16]
1 [-1.5713577608669729, 1.1102230246251568e-16]
dtype: object
See also
--------
`PCA on Wikipedia <https://en.wikipedia.org/wiki/Principal_component_analysis>`_
"""
pca = PCA(n_components=n_components, random_state=random_state, copy=False)
return pd.Series(pca.fit_transform(list(s)).tolist(), index=s.index)
def nmf(s, n_components=2, random_state=None) -> pd.Series:
"""
Performs non-negative matrix factorization.
Non-Negative Matrix Factorization (NMF) is often used in
natural language processing to find clusters of similar
texts (e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; see the example below).
Given a document-term matrix (so in
texthero usually a Series after applying :meth:`texthero.representation.tfidf`
or some other first representation function that assigns a scalar (a weight)
to each word), NMF will find n_components many topics (clusters)
and calculate a vector for each document that places it
correctly among the topics.
Parameters
----------
s : Pandas Series
n_components : Int. Default is 2.
Number of components to keep (dimensionality of output vectors).
If n_components is not set or None, all components are kept.
random_state : int, default=None
Pass an int for reproducible results across multiple function calls.
Returns
-------
Pandas Series with the vector calculated by NMF for the document in every cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer", "Music, Violin, Orchestra", "Football, Music"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.term_frequency)
>>> hero.nmf(s) # doctest: +SKIP
0 [0.9080190347553924, 0.0]
1 [0.0, 0.771931061231598]
2 [0.3725409073202516, 0.31656880119331093]
dtype: object
>>> # As we can see, the third document, which
>>> # is a mix of sports and music, is placed
>>> # between the two axes (the topics) while
>>> # the other documents are placed right on
>>> # one topic axis each.
See also
--------
`NMF on Wikipedia <https://en.wikipedia.org/wiki/Non-negative_matrix_factorization>`_
"""
nmf = NMF(n_components=n_components, init="random", random_state=random_state,)
return pd.Series(nmf.fit_transform(list(s)).tolist(), index=s.index)
def tsne(
s: pd.Series,
n_components=2,
perplexity=30.0,
learning_rate=200.0,
n_iter=1000,
random_state=None,
n_jobs=-1,
) -> pd.Series:
"""
Performs TSNE on the given pandas series.
t-distributed Stochastic Neighbor Embedding (t-SNE) is
a machine learning algorithm used to visualize high-dimensional data in fewer
dimensions. In natural language processing, the high-dimensional
data is usually a document-term matrix
(so in texthero usually a Series after applying :meth:`texthero.representation.tfidf`
or some other first representation function that assigns a scalar (a weight)
to each word) that is hard to visualize as there
might be many terms. With t-SNE, every document
gets a new, low-dimensional (n_components entries)
vector in such a way that the differences / similarities between
documents are preserved.
Parameters
----------
s : Pandas Series
n_components : int, default is 2.
Number of components to keep (dimensionality of output vectors).
If n_components is not set or None, all components are kept.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. Different values can result in significanlty
different results.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
random_state : int, default=None
Determines the random number generator. Pass an int for reproducible
results across multiple function calls.
n_jobs : int, optional, default=-1
The number of parallel jobs to run for neighbors search.
``-1`` means using all processors.
Returns
-------
Pandas Series with the vector calculated by t-SNE for the document in every cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer", "Music, Violin, Orchestra", "Football, Music"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.term_frequency)
>>> hero.tsne(s, random_state=42) # doctest: +SKIP
0 [-18.833383560180664, -276.800537109375]
1 [-210.60179138183594, 143.00535583496094]
2 [-478.27984619140625, -232.97410583496094]
dtype: object
See also
--------
`t-SNE on Wikipedia <https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding>`_
"""
tsne = TSNE(
n_components=n_components,
perplexity=perplexity,
learning_rate=learning_rate,
n_iter=n_iter,
random_state=random_state,
n_jobs=n_jobs,
)
return pd.Series(tsne.fit_transform(list(s)).tolist(), index=s.index)
"""
Clustering
"""
def kmeans(
s: pd.Series,
n_clusters=5,
n_init=10,
max_iter=300,
random_state=None,
algorithm="auto",
):
"""
Performs K-means clustering algorithm.
K-means clustering is used in natural language processing
to separate texts into k clusters (groups)
(e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; the K-means algorithm uses this
to separate them into two clusters).
Given a document-term matrix (so in
texthero usually a Series after applying :meth:`texthero.representation.tfidf`
or some other first representation function that assigns a scalar (a weight)
to each word), K-means will find k topics (clusters)
and assign a topic to each document.
Parameters
----------
s: Pandas Series
n_clusters: Int, default to 5.
The number of clusters to separate the data into.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm for a
single run.
random_state : int, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
algorithm : {"auto", "full", "elkan"}, default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient on data with well-defined
clusters, by using the triangle inequality. However it's more memory
intensive.
Returns
-------
Pandas Series with the cluster the document was assigned to in each cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer", "music, violin, orchestra", "football, fun, sports", "music, fun, guitar"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.term_frequency).pipe(hero.flatten) # TODO: when others get Representation Support: remove flatten
>>> hero.kmeans(s, n_clusters=2, random_state=42)
0 1
1 0
2 1
3 0
dtype: category
Categories (2, int64): [0, 1]
>>> # As we can see, the documents are correctly
>>> # separated into topics / clusters by the algorithm.
See also
--------
`kmeans on Wikipedia <https://en.wikipedia.org/wiki/K-means_clustering>`_
"""
vectors = list(s)
kmeans = KMeans(
n_clusters=n_clusters,
n_init=n_init,
max_iter=max_iter,
random_state=random_state,
copy_x=True,
algorithm=algorithm,
).fit(vectors)
return pd.Series(kmeans.predict(vectors), index=s.index).astype("category")
def dbscan(
s,
eps=0.5,
min_samples=5,
metric="euclidean",
metric_params=None,
leaf_size=30,
n_jobs=-1,
):
"""
Perform DBSCAN clustering.
Density-based spatial clustering of applications with noise (DBSCAN)
is used in natural language processing
to separate texts into clusters (groups)
(e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; the DBSCAN algorithm uses this
to separate them into clusters). It chooses the
number of clusters on its own.
Given a document-term matrix (so in
texthero usually a Series after applying :meth:`texthero.representation.tfidf`
or some other first representation function that assigns a scalar (a weight)
to each word), DBSCAN will find topics (clusters)
and assign a topic to each document.
Parameters
----------
s: Pandas Series
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. Use `sorted(sklearn.neighbors.VALID_METRICS['brute'])`
to see valid options.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
n_jobs : int, default=-1
The number of parallel jobs to run.
``-1`` means using all processors.
Returns
-------
Pandas Series with the cluster the document was assigned to in each cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer", "music, violin, orchestra", "football, fun, sports", "music, enjoy, guitar"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.tfidf).pipe(hero.flatten) # TODO: when others get Representation Support: remove flatten
>>> hero.dbscan(s, min_samples=1, eps=4)
0 0
1 1
2 0
3 1
dtype: category
Categories (2, int64): [0, 1]
>>> # As we can see, the documents are correctly
>>> # separated into topics / clusters by the algorithm
>>> # and we didn't even have to say how many topics there are!
See also
--------
`DBSCAN on Wikipedia <https://en.wikipedia.org/wiki/DBSCAN>`_
"""
return pd.Series(
DBSCAN(
eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
leaf_size=leaf_size,
n_jobs=n_jobs,
).fit_predict(list(s)),
index=s.index,
).astype("category")
def meanshift(
s,
bandwidth=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
n_jobs=-1,
max_iter=300,
):
"""
Perform mean shift clustering.
Mean shift clustering
is used in natural language processing
to separate texts into clusters (groups)
(e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; the mean shift algorithm uses this
to separate them into clusters). It chooses the
number of clusters on its own.
Given a document-term matrix (so in
texthero usually a Series after applying :meth:`texthero.representation.tfidf`
or some other first representation function that assigns a scalar (a weight)
to each word), mean shift will find topics (clusters)
and assign a topic to each document.
Parameters
----------
s: Pandas Series
bandwidth : float, default=None
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated.
Estimating takes time at least quadratic in the number of samples (i.e. documents).
For large datasets, it’s wise to set the bandwidth to a small value.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int, default=-1
The number of jobs to use for the computation.
``-1`` means using all processors
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
Pandas Series with the cluster the document was assigned to in each cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series([[1, 1], [2, 1], [1, 0], [4, 7], [3, 5], [3, 6]])
>>> hero.meanshift(s, bandwidth=2)
0 1
1 1
2 1
3 0
4 0
5 0
dtype: category
Categories (2, int64): [0, 1]
See also
--------
`Mean-Shift on Wikipedia <https://en.wikipedia.org/wiki/Mean_shift>`_
"""
return pd.Series(
MeanShift(
bandwidth=bandwidth,
bin_seeding=bin_seeding,
min_bin_freq=min_bin_freq,
cluster_all=cluster_all,
n_jobs=n_jobs,
max_iter=max_iter,
).fit_predict(list(s)),
index=s.index,
).astype("category")
"""
Topic modelling
"""
# TODO.
"""
Normalization.
"""
def normalize(s: pd.Series, norm="l2") -> pd.Series:
"""
Normalize every cell in a Pandas Series.
Input has to be a Representation Series.
Parameters
----------
s: Pandas Series
norm: str, default to "l2"
One of "l1", "l2", or "max". The norm that is used.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> idx = pd.MultiIndex.from_tuples(
... [(0, "a"), (0, "b"), (1, "c"), (1, "d")], names=("document", "word")
... )
>>> s = pd.Series([1, 2, 3, 4], index=idx)
>>> hero.normalize(s, norm="max")
document word
0 a 0.50
b 1.00
1 c 0.75
d 1.00
dtype: Sparse[float64, nan]
See Also
--------
Representation Series link TODO add link to tutorial
`Norm on Wikipedia <https://en.wikipedia.org/wiki/Norm_(mathematics)>`_
"""
is_valid_representation = (
isinstance(s.index, pd.MultiIndex) and s.index.nlevels == 2
)
if not is_valid_representation:
raise TypeError(
"The input Pandas Series should be a Representation Pandas Series and should have a MultiIndex. The given Pandas Series does not appears to have MultiIndex"
)
# TODO after merging representation: use _check_is_valid_representation instead
if pd.api.types.is_sparse(s):
s_coo_matrix = s.sparse.to_coo()[0]
else:
s = s.astype("Sparse")
s_coo_matrix = s.sparse.to_coo()[0]
s_for_vectorization = s_coo_matrix
result = sklearn_normalize(
s_for_vectorization, norm=norm
) # Can handle sparse input.
result_coo = coo_matrix(result)
s_result = | pd.Series.sparse.from_coo(result_coo) | pandas.Series.sparse.from_coo |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
import anndata
###########################################################################
################# Related to input/error handling #########################
###########################################################################
###########################################################################
############################## Colors ###################################
###########################################################################
class TestColors(object):
# tests set_metadata_colors
# test set_metadata_colors - vanilla
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {'GM12878': 'red', 'K562': 'blue'}
test = sg.set_metadata_colors('sample', cmap)
assert sg.adata.uns.sample_colors == ['red', 'blue']
# test set_metadata_colors - obs_col does not exist
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {1: 'red', 2: 'blue'}
with pytest.raises(Exception) as e:
test = sg.set_metadata_colors('stage', cmap)
assert 'Metadata column' in str(e.value)
###########################################################################
################# Related to plotting Swan Plots ##########################
###########################################################################
class TestPlotting(object):
# done: test_new_gene, calc_pos_sizes, calc_edge_curves, plot_graph,
# plot_transcript_path
# init_plot_settings test do not check for indicate_novel / indicate settigns
# init_plot_settings tests do not check for new dataset addition
# test init_plot_settings - https://github.com/mortazavilab/swan_vis/issues/8
# gene summary -> transcript path (same gene) -> gene summary (same gene)
def test_init_9(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_transcript_path('test5', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (same gene), also tests working from gene name
def test_init_8(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_graph('test2_gname', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (different gene)
def test_init_7(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test4_gid', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (same gene)
def test_init_6(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test3', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (different gene)
def test_init_5(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test5', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to gene summary (same gene)
def test_init_4(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test2', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to gene summary (different gene)
def test_init_3(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test1', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# transcript path (same gene)
def test_init_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.rcParams['figure.dpi'] = 160
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import multiprocessing
from singlecellmultiomics.bamProcessing.bamBinCounts import generate_commands, count_methylation_binned
import argparse
from colorama import Fore, Style
from singlecellmultiomics.utils import dataframe_to_wig
from singlecellmultiomics.methylation import MethylationCountMatrix
from singlecellmultiomics.bamProcessing.bamFunctions import get_reference_from_pysam_alignmentFile
from colorama import Fore,Style
from collections import defaultdict, Counter
from multiprocessing import Pool
from datetime import datetime
import pysam
from singlecellmultiomics.bamProcessing import get_contig_sizes, get_contig_size
from singlecellmultiomics.bamProcessing.bamBinCounts import generate_commands, read_counts
def sample_dict():
return defaultdict(Counter)
def methylation_to_cut_histogram(args):
(alignments_path, bin_size, max_fragment_size, \
contig, start, end, \
min_mq, alt_spans, key_tags, dedup, kwargs) = args
distance_methylation = defaultdict(sample_dict) # sample - > distance -> context(ZzHhXx) : obs
max_dist = 1000
# Define which reads we want to count:
known = set()
if 'known' in kwargs and kwargs['known'] is not None:
# Only ban the very specific TAPS conversions:
try:
with pysam.VariantFile(kwargs['known']) as variants:
for record in variants.fetch(contig, start, end):
if record.ref=='C' and 'T' in record.alts:
known.add( record.pos)
if record.ref=='G' and 'A' in record.alts:
known.add(record.pos)
except ValueError:
# This happends on contigs not present in the vcf
pass
p = 0
start_time = datetime.now()
with pysam.AlignmentFile(alignments_path, threads=4) as alignments:
# Obtain size of selected contig:
contig_size = get_contig_size(alignments, contig)
if contig_size is None:
raise ValueError('Unknown contig')
# Determine where we start looking for fragments:
f_start = max(0, start - max_fragment_size)
f_end = min(end + max_fragment_size, contig_size)
for p, read in enumerate(alignments.fetch(contig=contig, start=f_start,
stop=f_end)):
if p%50==0 and 'maxtime' in kwargs and kwargs['maxtime'] is not None:
if (datetime.now() - start_time).total_seconds() > kwargs['maxtime']:
print(f'Gave up on {contig}:{start}-{end}')
break
if not read_counts(read, min_mq=min_mq, dedup=dedup):
continue
tags = dict(read.tags)
for i, (qpos, methylation_pos) in enumerate(read.get_aligned_pairs(matches_only=True)):
# Don't count sites outside the selected bounds
if methylation_pos < start or methylation_pos >= end:
continue
call = tags['XM'][i]
if call=='.':
continue
sample = read.get_tag('SM')
distance = abs(read.get_tag('DS') - methylation_pos)
if distance>max_dist:
continue
distance_methylation[sample][(read.is_read1, read.is_reverse, distance)][call] +=1
return distance_methylation
threads = None
def get_distance_methylation(bam_path,
bp_per_job: int,
min_mapping_qual: int = None,
skip_contigs: set = None,
known_variants: str = None,
maxtime: int = None,
head: int=None,
threads: int = None,
**kwargs
):
all_kwargs = {'known': known_variants,
'maxtime': maxtime,
'threads':threads
}
all_kwargs.update(kwargs)
commands = generate_commands(
alignments_path=bam_path,
key_tags=None,
max_fragment_size=0,
dedup=True,
head=head,
bin_size=bp_per_job,
bins_per_job= 1, min_mq=min_mapping_qual,
kwargs=all_kwargs,
skip_contigs=skip_contigs
)
distance_methylation = defaultdict(sample_dict) # sample - > distance -> context(ZzHhXx) : obs
with Pool(threads) as workers:
for result in workers.imap_unordered(methylation_to_cut_histogram, commands):
for sample, data_for_sample in result.items():
for distance, context_obs in data_for_sample.items():
distance_methylation[sample][distance] += context_obs
return distance_methylation
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Extract methylation levels relative to cut site (DS tag) from bam file""")
argparser.add_argument('bamfile', metavar='bamfile', type=str)
argparser.add_argument('-bp_per_job', default=5_000_000, type=int, help='Amount of basepairs to be processed per thread per chunk')
argparser.add_argument('-threads', default=None, type=int, help='Amount of threads to use for counting, None to use the amount of available threads')
fi = argparser.add_argument_group("Filters")
fi.add_argument('-min_mapping_qual', default=40, type=int)
fi.add_argument('-head', default=None, type=int,help='Process the first n bins')
fi.add_argument('-skip_contigs', type=str, help='Comma separated contigs to skip', default='MT,chrM')
fi.add_argument('-known_variants',
help='VCF file with known variants, will be not taken into account as methylated/unmethylated',
type=str)
og = argparser.add_argument_group("Output")
og.add_argument('-prefix', default='distance_calls', type=str, help='Prefix for output files')
args = argparser.parse_args()
print('Obtaining counts ', end="")
r = get_distance_methylation(bam_path = args.bamfile,
bp_per_job = args.bp_per_job,
known_variants = args.known_variants,
skip_contigs = args.skip_contigs.split(','),
min_mapping_qual=args.min_mapping_qual,
head = args.head,
threads=args.threads,
)
print(f" [ {Fore.GREEN}OK{Style.RESET_ALL} ] ")
for ctx in 'zhx':
beta = {}
met = {}
un = {}
for sample, sample_data in r.items():
beta[sample] = {}
met[sample] = {}
un[sample] = {}
for distance, contexts in sample_data.items():
if ctx in contexts or ctx.upper() in contexts:
beta[sample][distance] = contexts[ctx.upper()]/(contexts[ctx.upper()]+contexts[ctx])
met[sample][distance] = contexts[ctx.upper()]
un[sample][distance] = contexts[ctx]
pd.DataFrame(beta).sort_index().T.sort_index().to_csv(f'{args.prefix}_beta_{ctx}.csv')
pd.DataFrame(beta).sort_index().T.sort_index().to_csv(f'{args.prefix}_beta_{ctx}.pickle.gz')
pd.DataFrame(met).sort_index().T.sort_index().to_csv(f'{args.prefix}_counts_{ctx.upper()}.csv')
pd.DataFrame(met).sort_index().T.sort_index().to_csv(f'{args.prefix}_counts_{ctx.upper()}.pickle.gz')
pd.DataFrame(un).sort_index().T.sort_index().to_csv(f'{args.prefix}_counts_{ctx}.csv')
pd.DataFrame(un).sort_index().T.sort_index().to_csv(f'{args.prefix}_counts_{ctx}.pickle.gz')
# Make plots
beta = {}
met = {}
un = {}
for sample, sample_data in r.items():
beta[sample] = {}
met[sample] = {}
un[sample] = {}
for distance, contexts in sample_data.items():
if distance[-1] > 500 or distance[-1] < 4: # Clip in sane region
continue
if ctx in contexts or ctx.upper() in contexts:
beta[sample][distance] = contexts[ctx.upper()] / (contexts[ctx.upper()] + contexts[ctx])
met[sample][distance] = contexts[ctx.upper()]
un[sample][distance] = contexts[ctx]
beta = pd.DataFrame(beta).sort_index().T.sort_index()
met = pd.DataFrame(met).sort_index().T.sort_index()
un = | pd.DataFrame(un) | pandas.DataFrame |
from django.shortcuts import render,HttpResponse
from expense.models import Expense
from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.models import HoverTool,BasicTickFormatter,DatetimeTickFormatter
from bokeh.models import ColumnDataSource
from bokeh.layouts import row
from datetime import datetime,timedelta
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# Create your views here.
def analysis(request):
df = pd.DataFrame(Expense.objects.values())
df.drop('id',axis=1,inplace=True)
df["amount"] = df["amount"].astype(int)
#sorting on date basis
df = df.sort_values(by=['date'])
#now I don't need time so remove time from date
df['date'] = pd.to_datetime(df['date']).dt.date
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
# Heavily influenced by: https://www.kaggle.com/opanichev/lightgbm-and-tf-idf-starter?login=true#
import pandas as pd
import lightgbm as lgbm
import numpy as np
import os
import scripts.donorchoose_functions as fn
import re
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
from tqdm import tqdm
# Reading in data
dtype = {
'id': str,
'teacher_id': str,
'teacher_prefix': str,
'school_state': str,
'project_submitted_datetime': str,
'project_grade_category': str,
'project_subject_categories': str,
'project_subject_subcategories': str,
'project_title': str,
'project_essay_1': str,
'project_essay_2': str,
'project_essay_3': str,
'project_essay_4': str,
'project_resource_summary': str,
'teacher_number_of_previously_posted_projects': int,
'project_is_approved': np.uint8,
}
data_dir = "F:/Nerdy Stuff/Kaggle/DonorsChoose"
sub_path = "F:/Nerdy Stuff/Kaggle submissions/DonorChoose"
train = pd.read_csv(os.path.join(data_dir, "data/train.csv"), dtype=dtype)
test = pd.read_csv(os.path.join(data_dir, "data/test.csv"), dtype=dtype)
print("Extracting text features")
train = fn.extract_text_features(train)
test = fn.extract_text_features(test)
print("Extracting datetime features")
train = fn.extract_timestamp_features(train)
test = fn.extract_timestamp_features(test)
print("Joining together essays")
train['project_essay'] = fn.join_essays(train)
test['project_essay'] = fn.join_essays(test)
train = train.drop([
'project_essay_1', 'project_essay_2',
'project_essay_3', 'project_essay_4'
], axis=1)
test = test.drop([
'project_essay_1', 'project_essay_2',
'project_essay_3', 'project_essay_4'
], axis=1)
sample_sub = pd.read_csv(os.path.join("data/sample_submission.csv"))
res = pd.read_csv(os.path.join(data_dir, "data/resources.csv"))
id_test = test['id'].values
# Rolling up resources to one row per application
print("Rolling up resource requirements to one line and creating aggregate feats")
res = (res
.groupby('id').apply(fn.price_quantity_agg)
.reset_index())
res['mean_price'] = res['price_sum']/res['quantity_sum']
print("Train has %s rows and %s cols" % (train.shape[0], train.shape[1]))
print("Test has %s rows and %s cols" % (test.shape[0], test.shape[1]))
print("Res has %s rows and %s cols" % (res.shape[0], res.shape[1]))
print("Train has %s more rows than test" % (train.shape[0] / test.shape[0]))
train = pd.merge(left=train, right=res, on="id", how="left")
test = pd.merge(left=test, right=res, on="id", how="left")
print("Train after merge has %s rows and %s cols" % (train.shape[0], train.shape[1]))
print("Test after merge has %s rows and %s cols" % (test.shape[0], test.shape[1]))
print("Concatenating datasets so I can build the label encoders")
df_all = pd.concat([train, test], axis=0)
# TF-IDF and label encoding - will take first iteration from kaggle script and then move to sklearn pipelines?
# Renaming these cols to include later on
print('Label Encoder...')
col_rename = {
'teacher_id': 'enc_teacher_id',
'teacher_prefix': 'enc_teacher_prefix',
'school_state': 'enc_school_state',
'project_grade_category': 'enc_project_grade_category',
'project_subject_categories': 'enc_project_subject_categories',
'project_subject_subcategories': 'enc_project_subject_subcategories' # Can refactor to be more elegant
}
train = train.rename(columns=col_rename)
test = test.rename(columns=col_rename)
df_all = df_all.rename(columns=col_rename)
r = re.compile("enc_")
filtered = filter(r.match, train.columns)
cols = [i for i in filtered]
for c in tqdm(cols):
le = LabelEncoder()
le.fit(df_all[c].astype(str))
train[c] = le.transform(train[c].astype(str))
test[c] = le.transform(test[c].astype(str))
del le
print("Modelling")
cols = train.columns
variables_names_to_include = ['price', 'quantity', '_wc',
'_len', 'subtime_', 'enc_']
vars_to_include = []
for variable in variables_names_to_include:
regex = ".*" + variable + "*."
print(regex)
r = re.compile(regex)
filtered = filter(r.match, cols)
result = [i for i in filtered]
for res in result:
vars_to_include.append(res)
X_tr = train[vars_to_include]
y_tr = train['project_is_approved'].values
X_tst = test[vars_to_include]
fold_scores = []
skf = StratifiedKFold(n_splits=10)
clf = lgbm.LGBMClassifier()
for i, (train_idx, valid_idx) in enumerate(skf.split(X_tr, y_tr)):
print("Fold #%s" % (i + 1))
X_train, X_valid = X_tr.iloc[train_idx, :], X_tr.iloc[valid_idx, :]
y_train, y_valid = y_tr[train_idx], y_tr[valid_idx]
clf.fit(X_train, y_train)
y_valid_predictions = clf.predict_proba(X_valid)[:, 1]
auc_roc_score = roc_auc_score(y_valid, y_valid_predictions)
fold_scores.append(auc_roc_score)
mean_score = round(np.mean(fold_scores), 3)
std_score = round(np.std(fold_scores), 3)
print('AUC = {:.3f} +/- {:.3f}'.format(mean_score, std_score))
# Fitting model on whole train
clf.fit(X_tr, y_tr)
predictions = clf.predict_proba(X_tst)[:, 1]
# Submitting to F:/
pred_set = | pd.DataFrame() | pandas.DataFrame |
########################################################
# <NAME> - drigols #
# Last update: 04/10/2021 #
########################################################
from sklearn.datasets import load_breast_cancer
import pandas as pd
pd.set_option('display.max_columns', 30)
df = load_breast_cancer() # Dataset instance.
x = | pd.DataFrame(df.data, columns=[df.feature_names]) | pandas.DataFrame |
# coding: utf-8
import argparse
from logging import info, basicConfig, INFO
import logging
import os
import pickle
from datetime import datetime as dt
import pandas as pd
from gensim.models import KeyedVectors
import numpy as np
import matplotlib.pyplot as plt
from nltk.tokenize import wordpunct_tokenize
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier, LogisticRegressionCV
from sklearn.svm import LinearSVC, NuSVC
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
LOG_HEAD = '[%(asctime)s] %(levelname)s: %(message)s'
basicConfig(format=LOG_HEAD, level=INFO)
class DataWorker:
def __init__(self, embeddings_path="embeddings/arabic-news.bin",
train_data_path=None, test_data_path=None, binary_embeddings=True):
self.embeddings, self.dimension = self.load_vec(embeddings_path, binary_embeddings)
self.train_data = self.load_data(train_data_path)
self.test_data = self.load_data(test_data_path)
if not train_data_path and not test_data_path:
logging.info("No data has been provided. Please provide at least a data source. Exiting...")
exit(1)
def load_vec(self, path, format):
# vectors file
"""load the pre-trained embedding model"""
if not self.check_file_exist(path):
logging.info("{} Path to embeddings doesn't exist. Exiting...")
exit(1)
if format:
w2v_model = KeyedVectors.load_word2vec_format(path, binary=True)
else:
w2v_model = KeyedVectors.load(path)
w2v_model.init_sims(replace=True) # to save memory
vocab, vector_dim = w2v_model.syn0.shape
return w2v_model, vector_dim
def load_data(self, path):
# TODO: update reading methods to handle multiple data sources
if not self.check_file_exist(path):
logging.info("{} Path to data doesn't exist. Skipping...")
return None
dataset = | pd.read_csv(path) | pandas.read_csv |
"""
Copyright © 2021-2022 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import logging
import os
import platform
import re
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def get_l2data_root(warn: bool = True) -> Path:
"""Get the root directory where L2 data and logs are saved.
Args:
warn (bool, optional): Flag for enabling/disabling warning message. Defaults to True.
Returns:
Path: The L2Data root directory path.
"""
try:
root_dir = Path(os.environ["L2DATA"])
except KeyError:
if warn:
msg = (
"L2DATA directory not specified. Using ~/l2data as default.\n\n"
"This module requires the environment variable 'L2DATA' be set to the top level folder under which\n"
"all data is, or will be, stored. For example, consider the following commands:\n"
"\t(bash) export L2DATA=/path/to/data/l2data\n"
"\t(Windows) set L2DATA=C:\\\\path\\\\to\\\\data\\\\l2data\n"
)
logger.warning(msg)
root_dir = "l2data"
if platform.system().lower() == "windows":
root_dir = Path(os.environ["APPDATA"]) / root_dir
else:
root_dir = Path(os.path.expanduser("~")) / root_dir
if not root_dir.exists():
root_dir.mkdir(parents=True, exist_ok=True)
return root_dir
def get_l2root_base_dirs(directory_to_append: str, sub_to_get: str = "") -> Path:
"""Get the base L2DATA path and go one level down with the option to return the path for the
directory or the file underneath.
e.g. $L2DATA/logs/some_log_directory or $L2DATA/taskinfo/info.json
Args:
directory_to_append (str): The L2Data subdirectory.
sub_to_get (str, optional): The further subdirectory or file to append. Defaults to ''.
Returns:
Path: The path of the L2Data subdirectory or file.
"""
return get_l2data_root() / directory_to_append / sub_to_get
def get_fully_qualified_name(log_dir: Path) -> Path:
"""Get fully qualified path of log directory.
Checks if the log directory path exists as a relative or absolute path first. If not, then this
function will check L2Data/logs.
Args:
log_dir (str): The log directory name.
Raises:
NotADirectoryError: If the directory is not found.
Returns:
str: The full path to the log directory.
"""
if log_dir.exists():
return log_dir
elif log_dir.parent == Path("."):
return get_l2root_base_dirs("logs", log_dir.name)
else:
raise NotADirectoryError
def read_log_data(log_dir: Path, analysis_variables: List[str] = None) -> pd.DataFrame:
"""Parse input directory for data log files and aggregate into Pandas DataFrame.
Args:
log_dir (Path): The top-level log directory.
analysis_variables (List[str], optional): Filtered column names to import. Defaults to None.
Raises:
FileNotFoundError: If log directory is not found.
Returns:
pd.DataFrame: The aggregated log data.
"""
logs = None
fully_qualified_dir = get_fully_qualified_name(log_dir)
if not fully_qualified_dir.is_dir():
raise FileNotFoundError(f"Log directory not found!")
for data_file in fully_qualified_dir.rglob("data-log.tsv"):
if analysis_variables is not None:
default_cols = [
"block_num",
"exp_num",
"block_type",
"worker_id",
"task_name",
"task_params",
"exp_status",
"timestamp",
]
df = | pd.read_csv(data_file, sep="\t") | pandas.read_csv |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = | pd.concat([loading_scale_dff, zero_scale_dff], axis=0) | pandas.concat |
import pandas as pd
class CASTableBase(object):
source_sql = None
source_data = None
source_cas = None
source_caslib = None
cas_table_name = None
caslib = None
decision_source = None
decision = None
db_conn = None
clean_up = False
def __init__(self, viya_conn, db_conn=None):
self.viya_conn = viya_conn
self.register_db_connection(db_conn)
self.set_decision_source()
def __del__(self):
if self.clean_up:
self.remove_from_cas()
def register_db_connection(self, db_conn):
self.db_conn = db_conn
def set_decision_source(self):
if self.decision_source is None:
return
module_obj = __import__('CAS')
if hasattr(module_obj, self.decision_source):
decision_module = getattr(module_obj, self.decision_source)
self.decision = decision_module(self.db_conn, self.viya_conn)
def remove_from_cas(self):
try:
self.viya_conn.drop_cas_table(self.cas_table_name, self.caslib)
except:
pass
def update_from_records(self, records):
self.viya_conn.update_cas_table(records, self.cas_table_name, self.caslib)
def update_from_source(self):
self.update_from_records(self.get_source_data())
def get_source_data(self):
if self.source_data is not None:
return self.source_data
self.pre_process_source_data()
if self.source_cas and self.source_caslib:
self.source_data = self.viya_conn.get_cas_table(self.source_cas, self.source_caslib)
elif self.decision_source:
self.decision.exec()
self.source_data = self.viya_conn.get_cas_table(self.cas_table_name, self.caslib)
else:
if self.source_sql is not None:
self.source_data = self.read_sql(self.source_sql, True)
try:
self.source_data.drop(['index'], axis=1, inplace=True)
except KeyError:
pass
except IndexError:
pass
self.source_data = pd.DataFrame().from_records(self.source_data.to_records())
self.post_process_source_data()
return self.source_data
def pre_process_source_data(self):
pass
def post_process_source_data(self):
pass
def get_from_cas(self):
return self.viya_conn.get_cas_table(self.cas_table_name, self.caslib)
def read_sql(self, sql, clear_index=False):
self.__check_db_conn()
if clear_index:
return | pd.read_sql_query(sql, self.db_conn.conn, index_col=None) | pandas.read_sql_query |
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
def write_population(rabbit: list, fox: list, grassland: list, forest: list, pond: list, lake: list):
f = open('population_graph', 'w')
size = len(rabbit)
for i in range(0, size):
f.write(rabbit[i] + " " + fox[i] + " " + grassland[i] + " " + forest[i] + " " + pond[i] + " " + lake)
def plot_animal_population(rabbit: list, fox: list):
x_coordinates = range(1, len(rabbit) + 1)
plt.plot(x_coordinates, rabbit, color='#3daeff', label='rabbit population')
plt.plot(x_coordinates, fox, color='#ff3679', label='fox population')
plt.legend(['rabbit', 'fox'])
plt.show()
def plot_resource_changes(grassland: list, forest: list, pond: list, lake: list):
x_coordinates = range(1, len(grassland) + 1)
plt.plot(x_coordinates, grassland, color='#21d9c3', label='grassland')
plt.plot(x_coordinates, forest, color='#460991', label='forest')
plt.plot(x_coordinates, pond, color='#088538', label='pond')
plt.plot(x_coordinates, lake, color='#d6d30b', label='lake')
plt.legend(['grassland', 'forest', 'pond', 'lake'])
plt.show()
def plot_all(grassland: list, forest: list, pond: list, lake: list, rabbit: list, fox: list):
x_coordinates = range(1, len(rabbit) + 1)
plt.plot(x_coordinates, rabbit, linestyle='dashed', color='#3daeff', label='rabbit population')
plt.plot(x_coordinates, fox, linestyle='dashed', color='#ff3679', label='fox population')
plt.plot(x_coordinates, grassland, color='#21d9c3', label='grassland')
plt.plot(x_coordinates, forest, color='#460991', label='forest')
plt.plot(x_coordinates, pond, color='#088538', label='pond')
plt.plot(x_coordinates, lake, color='#d6d30b', label='lake')
plt.legend(['rabbit', 'fox', 'grassland', 'forest', 'pond', 'lake'])
plt.show()
def write(grassland: list, forest: list, pond: list, lake: list, rabbit: list, fox: list):
f = open('population_graph', 'w')
size = len(rabbit)
for i in range(0, size):
f.write(str(rabbit[i]) + "," + str(fox[i]) + "," + str(grassland[i]) + "," + str(forest[i]) +
"," + str(lake[i]) + "," + str(pond[i]) + "\n")
f.close()
def find_genetic_variation_rabbit_std(rabbits: list) -> list:
variance = []
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.mating_requirement)
variance.append(np.std(data))
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.step_size)
variance.append(np.std(data))
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.thirst_resistance)
variance.append(np.std(data))
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.hunger_resistance)
variance.append(np.std(data))
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.predator_fear)
variance.append(np.std(data))
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.vision)
variance.append(np.std(data))
return variance
def find_genetic_variation_fox_std(foxes: list) -> list:
variance = []
data = []
for fox in foxes:
data.append(fox.genetics.mating_requirement)
variance.append(np.std(data))
data = []
for fox in foxes:
data.append(fox.genetics.step_size)
variance.append(np.std(data))
data = []
for fox in foxes:
data.append(fox.genetics.thirst_resistance)
variance.append(np.std(data))
data = []
for fox in foxes:
data.append(fox.genetics.hunger_resistance)
variance.append(np.std(data))
data = []
for fox in foxes:
data.append(fox.genetics.hunting_skill)
variance.append(np.std(data))
data = []
for fox in foxes:
data.append(fox.genetics.vision)
variance.append(np.std(data))
return variance
def find_genetic_variation_rabbit_mean_ad(rabbits: list) -> list:
variance = []
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.mating_requirement)
variance.append(pd.Series(data).mad())
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.step_size)
variance.append(pd.Series(data).mad())
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.thirst_resistance)
variance.append(pd.Series(data).mad())
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.hunger_resistance)
variance.append(pd.Series(data).mad())
data = []
for rabbit in rabbits:
data.append(rabbit.genetics.predator_fear)
variance.append( | pd.Series(data) | pandas.Series |
# Import Packages
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
# ---- IMPORT DATA ----
# Read CSV
df = pd.read_csv("./RawData/Sample_Spreadsheet.csv")
# Create arrays to hold each column
ar_Sequence = df.iloc[:, 0].values
ar_NTerminal = df.iloc[:, 1].values
ar_CTerminal = df.iloc[:, 2].values
ar_Structure = df.iloc[:, 3].values
ar_CombinedX = df.iloc[:, 0:3].values
# Encode the categorical columns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder = LabelEncoder()
encoded_Structure = labelEncoder.fit_transform(ar_Structure)
encoded_NTerminal = labelEncoder.fit_transform(ar_NTerminal)
encoded_CTerminal = labelEncoder.fit_transform(ar_CTerminal)
# Encoding peptide sequence
encoded_dfSequence = pd.DataFrame() # Not used yet
alphabet_Sequence = np.array([]) # Used
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = np.zeros((26, len(ar_Sequence)))
count = 0
for Line in ar_Sequence:
# Save sequence in a dataframe
current_Sequence = []
for Letter in Line:
current_Sequence = np.append(current_Sequence, [ord(Letter)])
encoded_dfSequence = encoded_dfSequence.append( | pd.Series(current_Sequence) | pandas.Series |
from pandas import read_csv, to_datetime
import os
import fiona
import hydrofunctions as hf
from pandas import date_range, DatetimeIndex, DataFrame
def get_station_daily_data(param, start, end, sid, freq='dv', out_dir=None):
try:
nwis = hf.NWIS(sid, freq, start_date=start, end_date=end)
df = nwis.df(param)
if freq == 'iv':
out_file = os.path.join(out_dir, '{}_{}.csv'.format(sid, start[:4]))
df.to_csv(out_file)
elif out_dir:
out_file = os.path.join(out_dir, '{}.csv'.format(sid))
df.to_csv(out_file)
else:
return df
except ValueError as e:
print(e)
except hf.exceptions.HydroNoDataError:
print('no data for {} to {}'.format(start, end))
pass
def get_station_daterange_data(year_start, daily_q_dir, aggregate_q_dir, start_month=None, end_month=None,
resample_freq='A', convert_to_mcube=True):
q_files = [os.path.join(daily_q_dir, x) for x in os.listdir(daily_q_dir)]
s, e = '{}-01-01'.format(year_start), '2020-12-31'
daterange = date_range(s, e, freq='D')
idx = DatetimeIndex(daterange, tz=None)
out_records, short_records = [], []
for c in q_files:
sid = os.path.basename(c).split('.')[0]
df = read_hydrograph(c)
if start_month or end_month:
idx_window = idx[idx.month.isin([x for x in range(start_month, end_month + 1)])]
df = df[df.index.month.isin([x for x in range(start_month, end_month + 1)])]
df = df[df.index.year.isin([x for x in range(year_start, 2021)])]
idx = idx_window
dflen, idxlen = df.shape[0], idx.shape[0]
if dflen < idxlen:
short_records.append(sid)
if float(dflen) / idxlen < 0.8:
print(sid, 'df: {}, idx: {}, q skipped'.format(df.shape[0], int(idx.shape[0])))
continue
df = df.reindex(idx)
# cfs to m ^3 d ^-1
if convert_to_mcube:
df = df * 2446.58
df = df.resample(resample_freq).agg(DataFrame.sum, skipna=False)
out_file = os.path.join(aggregate_q_dir, '{}.csv'.format(sid))
df.to_csv(out_file)
out_records.append(sid)
print(sid)
print('{} processed'.format(len(out_records)))
def read_hydrograph(c):
df = | read_csv(c) | pandas.read_csv |
import pandas as pd
import numpy as np
from scipy.stats import bernoulli
from scipy.stats import uniform
def assign_bags(strategy='random_n_size', random_seed=None, **kwargs):
# Arguments:
# X: feature matrix, each feature vector should be represented as a row vector in the matrix
# num_bags: number of bags to make;
# will not effect the output if strategy==feature
# strategy: 'random': uniformly random with varying bag size, need arguments 'num_bags' and 'X'
# 'random_n_size': uniformly random with fixed bag size, need arguments 'num_bags' and 'X'
# 'feature': bag id is assigned based on the feature class, need arguments 'strategy_col' and 'X'
# 'multi-source': multi-source corruption i.e. given number of different bag proportions;
# need arguments 'distribution', 'y', 'pos_label';
# 'y' is the label vector
# 'distribution' is a dictionary mapping (pos_instances, neg_instances) to the
# number of bag under this distribution
# 'uniform_prop': for each bag, first generate a proportion with respect to a distribution,
# then generate the labels w.r.t Bernoulli distribution;
# need argument 'distribution', 'X', 'y', 'size', and 'pos_label';
# 'X' is the feature matrix
# 'y' is the label vector
# 'distribution' is a dictionary mapping [left_end, right_end] to the
# number of bag with this distribution
# 'bag_size' is the size of a bag
# strategy_col: if strategy is 'feature', strategy_col is the pandas Series of that column
# random_seed:
#
# Functionality:
# assign bag id each instance; will NOT modify X
#
# Returns:
# (if the strategy is 'uniform_prop', returns X, y, bag_id)
# bag_id: a numpy ndarray of bag ids, corresponding to X by location;
# bag ids are integers from 0 to X.shape[0]
if random_seed is not None:
np.random.seed(random_seed) # fix a random seed if given
# assign random bag index to instances, bag size can vary
if strategy == 'random':
num_bags = kwargs['num_bags']
X = kwargs['X']
bag_id = np.random.randint(0, high=num_bags, size=X.shape[0])
# assign random bag index to instances, bag size is fixed
elif strategy == 'random_n_size':
num_bags = kwargs['num_bags']
X = kwargs['X']
# check if the number of instances is divisible by the number of bags
assert X.shape[0] % num_bags == 0, \
"number of instances %d is not divisible by number of bags %d" % (X.shape[0], num_bags)
n = X.shape[0] // num_bags # compute the size of each bag
# assign bag index by appending integers to a 1d DataFrame and shuffling it.
bag_id = pd.DataFrame(0, index=range(n), columns=['bag_id'])
for i in range(1, num_bags):
temp = pd.DataFrame(i, index=range(n), columns=['bag_id'])
bag_id = bag_id.append(temp, ignore_index=True)
np.random.shuffle(bag_id.values)
bag_id = bag_id.values.reshape(-1, )
# this is the method used in "no label no cry" code
elif strategy == 'feature':
strategy_col = kwargs['strategy_col']
X = kwargs['X']
bag_id = pd.Categorical(X[strategy_col]).codes
# assign bag ids with desired label proportions
elif strategy == 'multisource':
distr = kwargs['distribution']
y = kwargs['y']
pos_label = kwargs['pos_label']
bag_id = _multisource_helper(distr, y, pos_label)
elif strategy == 'uniform_prop':
distr = kwargs['distribution']
X = kwargs['X']
y = kwargs['y']
pos_label = kwargs['pos_label']
bag_size = kwargs['bag_size']
distr_ = {} # dictionary mapping (pos_instances, neg_instances) to the number of bag under
for interval, num in distr.items():
left, right = interval
for i in range(num):
prob = uniform.rvs(loc=left, scale=right - left)
pos_num = bernoulli.rvs(prob, size=bag_size).sum()
neg_num = bag_size - pos_num
if not ((pos_num, neg_num) in distr_.keys()):
distr_[(pos_num, neg_num)] = 0
distr_[(pos_num, neg_num)] += 1
pos_total = (y == pos_label).astype(int).sum()
neg_total = (y != pos_label).astype(int).sum()
pos_in_bag = 0
neg_in_bag = 0
for prop, num in distr_.items():
pos_in_bag += prop[0] * num
neg_in_bag += prop[1] * num
# check the number of labels
assert pos_in_bag <= pos_total, "insufficient positive labels, expect %d, have %d" % (pos_in_bag, pos_total)
assert neg_in_bag <= neg_total, "insufficient negative labels, expect %d, have %d" % (neg_in_bag, neg_total)
# done checking
# sample labels
X_pos = X[y == pos_label]
y_pos = y[y == pos_label]
X_neg = X[y != pos_label]
y_neg = y[y != pos_label]
random_perm_pos = np.random.permutation(X_pos.index)
X_pos_shuffled = X_pos.reindex(random_perm_pos)
y_pos_shuffled = y_pos.reindex(random_perm_pos)
X_pos_sample = X_pos_shuffled[:pos_in_bag]
y_pos_sample = y_pos_shuffled[:pos_in_bag]
random_perm_neg = np.random.permutation(X_neg.index)
X_neg_shuffled = X_neg.reindex(random_perm_neg)
y_neg_shuffled = y_neg.reindex(random_perm_neg)
X_neg_sample = X_neg_shuffled[:neg_in_bag]
y_neg_sample = y_neg_shuffled[:neg_in_bag]
new_X = pd.concat([X_pos_sample, X_neg_sample], ignore_index=True)
new_y = | pd.concat([y_pos_sample, y_neg_sample], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from tqdm import tqdm
import itertools
class Example1:
def get(self):
df = pd.read_csv('https://raw.githubusercontent.com/jeffrichardchemistry/pyECLAT/master/data/base1.csv', header=None)
return df
class Example2:
def get(self):
df = pd.read_csv('https://raw.githubusercontent.com/jeffrichardchemistry/pyECLAT/master/data/base2.csv', header=None)
return df
class ECLAT():
"""
Arguments
---------------------
data
The `data` is a pandas dataframe format. The data should look like the example below.
In this case, each line represents the purchase of one person.
>>> Example of data format
0 1 2 3
0 milk beer bread butter
1 coffe bread butter NaN
2 coffe bread butter NaN
3 milk coffe bread butter
4 beer NaN NaN NaN
5 butter NaN NaN NaN
6 bread NaN NaN NaN
7 bean NaN NaN NaN
8 rice bean NaN NaN
9 rice NaN NaN NaN
After get a ECLAT class instance, a binary dataframe is created,
in which the column names are the product names. 0 = 'No' 1 = 'Yes' for a transaction that occurred.
>>> eclat_instance = ECLAT(df=data)
eclat_instance.df_bin
bean beer bread butter milk rice coffe
0 0 1 1 1 1 0 0
1 0 0 1 1 0 0 1
2 0 0 1 1 0 0 1
3 0 0 1 1 1 0 1
4 0 1 0 0 0 0 0
5 0 0 0 1 0 0 0
6 0 0 1 0 0 0 0
7 1 0 0 0 0 0 0
8 1 0 0 0 0 1 0
9 0 0 0 0 0 1 0
verbose
Show a progress bar in three steps.
"""
def __init__(self, data, verbose=False):
self.data = data
self.uniq_ = []
ECLAT._getUnique(self)
if verbose:
self.df_bin = ECLAT._makeTable(self, verbose=True)
else:
self.df_bin = ECLAT._makeTable(self, verbose=False)
def _getUnique(self):
# Return a list with unique names of features
dif_atrib = []
n_columns = len(self.data.columns)
for column in range(n_columns):
dif_atrib.extend(list(self.data.iloc[:, column].unique()))
self.uniq_ = list(set(dif_atrib))
def _makeTable(self, verbose=False):
"""
Remove nan and return a binary table with name of products how column names. 0 = 'No' 1 = 'Yes'
"""
columns_table = self.uniq_
if np.nan in columns_table:
columns_table.remove(np.nan)
elif 'nan' in columns_table:
columns_table.remove('nan')
elif 'NaN' in columns_table:
columns_table.remove('NaN')
ECLAT._getUnique(self) #don't modify uniq_
dict_index = {}
lst_index = []
if verbose:
for column in tqdm(columns_table):
for i in range(len(self.data.columns)):
lst_index.extend(list(self.data.loc[self.data[i] == column].index.values))
if i == len(self.data.columns) - 1 :
dict_index[column] = list(set(lst_index))
lst_index = []
else:
for column in columns_table:
for i in range(len(self.data.columns)):
lst_index.extend(list(self.data.loc[self.data[i] == column].index.values))
if i == len(self.data.columns) - 1 :
dict_index[column] = list(set(lst_index))
lst_index = []
data_init = {}
if verbose:
for i in tqdm(columns_table):
data_init[i] = [0 for i in range(len(self.data))]
else:
for i in columns_table:
data_init[i] = [0 for i in range(len(self.data))]
df_table = | pd.DataFrame(data_init) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import umap
import mne, numpy as np
import seaborn as sns
import glob
import pandas as pd
import os, os.path as op
from sklearn.preprocessing import StandardScaler
from scipy.stats import kurtosis
from scipy.signal import welch
import pylab
def get_subjid(filename):
return os.path.basename(filename).split('_')[0]
def get_raw_subj_data(subjid, topdir='/fast/ICA/*/'):
glob_cmd = os.path.join(topdir, subjid+'*_300srate.fif')
return glob.glob(glob_cmd)[0]
def get_distribution(filename):
#Datasets are in the following formate /topdir/Distribution/Dataset
return filename.split('/')[-2]
def assign_repo_EEG_labels(dframe):
'''
Add repo specific EEG names to data
Parameters
----------
dframe : pd.DataFrame
DESCRIPTION.
Returns
-------
dframe : pd.DataFrame
DataFrame with EOG and ECG labels marked as veog, heog, ekg.
'''
dframe['veog'] = None
dframe['heog'] = None
dframe['ekg'] = None
dframe.loc[dframe.distribution=='CAMCAN', ['veog','heog','ekg']] = \
'EOG061','EOG062','ECG063'
dframe.loc[dframe.distribution=='MOUS', ['veog','heog','ekg']] = \
'EEG058','EEG057','EEG059'
dframe.loc[dframe.distribution=='HCP', ['veog','heog','ekg']] = \
'VEOG','HEOG','ECG'
dframe.loc[dframe.distribution=='NIH_HV', ['veog','heog','ekg']] = \
None, None, None
return dframe
def populate_dframe(topdir='/fast/ICA/', load_ica=False):
dsets=glob.glob(op.join(topdir, '*/*0-ica.fif'))
dframe=pd.DataFrame(dsets, columns=['ica_filename'])
dframe['distribution']=dframe['ica_filename'].apply(get_distribution)
dframe['subjid']=dframe['ica_filename'].apply(get_subjid)
dframe['raw_fname'] = dframe['subjid'].apply(get_raw_subj_data)
dframe.distribution.value_counts()
dframe = assign_repo_EEG_labels(dframe)
if load_ica == False:
return dframe
# else:
# return dframe,
def get_consistent_ch_names(current_dframe):
'''Hack to get all the same topomap dimensions'''
ch_names=set()
for index,row in current_dframe.iterrows():
raw = mne.io.read_raw_fif(row['raw_fname'])
ch_names=set(raw.ch_names).union(ch_names)
if current_dframe.iloc[0]['distribution']=='MOUS':
ch_names = [i for i in ch_names if i[0]=='M']
# elif current_dframe.iloc[0]['distribution']=='CAMCAN':
# ch_names = [i for i in ch_names if i[0]=='M']
# tmp=ica.get_sources(raw, start=0, stop=100*raw.info['sfreq'])
# freqs, _ =welch(tmp._data, fs=raw.info['sfreq'])
return ch_names
def calc_hcp_bipolar(row):
'''Load info from the mne-hcp and return bipolar calculated ECG, VEOG, HEOG'''
subjid = row.subjid
#info read from mne-hcp not the same as the one tied to the raw dataset
info = mne.io.read_info(f'/fast/ICA/HCPinfo/{subjid}-info.fif')
raw=mne.io.read_raw_fif(row.raw_fname, preload=True)
ecgPos_idx=info.ch_names.index('ECG+')
ecgNeg_idx=info.ch_names.index('ECG-')
veogPos_idx=info.ch_names.index('VEOG+')
veogNeg_idx=info.ch_names.index('VEOG-')
heogPos_idx=info.ch_names.index('HEOG+')
heogNeg_idx=info.ch_names.index('HEOG-')
ecg=raw._data[ecgPos_idx,:]-raw._data[ecgNeg_idx,:]
veog=raw._data[veogPos_idx,:]-raw._data[veogNeg_idx,:]
heog=raw._data[heogPos_idx,:]-raw._data[heogNeg_idx,:]
raw._data[ecgPos_idx,:]=ecg
raw._data[veogPos_idx,:]=veog
raw._data[heogPos_idx,:]=heog
raw.rename_channels({raw.ch_names[ecgPos_idx]:'ECG'})
raw.rename_channels({raw.ch_names[veogPos_idx]:'VEOG'})
raw.rename_channels({raw.ch_names[heogPos_idx]:'HEOG'})
raw.drop_channels(raw.ch_names[ecgNeg_idx])
raw.drop_channels(raw.ch_names[veogNeg_idx])
raw.drop_channels(raw.ch_names[heogNeg_idx])
return raw
def assess_ICA_spectral_properties(current_dframe):
'''Loop over all datasets and return ICA metrics'''
current_dframe.reset_index(inplace=True)
#Load first dataset to allocate size to the dataframe
raw = mne.io.read_raw_fif(current_dframe.iloc[0]['raw_fname'])
ch_names = get_consistent_ch_names(current_dframe)
ica = mne.preprocessing.read_ica(current_dframe.iloc[0]['ica_filename'])
ica_timeseries = ica.get_sources(raw, start=0, stop=100*raw.info['sfreq'])
comp_num, samples = ica_timeseries._data.shape
freqs, _ = welch(ica_timeseries._data, fs=raw.info['sfreq'])
spectra_dframe = pd.DataFrame(np.zeros([comp_num*len(current_dframe),
len(freqs)]), columns = freqs)
spectra_dframe['kurtosis'] = 0
for index,row in current_dframe.iterrows():
print(index)
veog_ch, heog_ch, ekg_ch = row[['veog', 'heog', 'ekg']]
ica = mne.preprocessing.read_ica(row['ica_filename'])
component = ica.get_components()
if row.distribution == 'HCP':
raw = calc_hcp_bipolar(row)
else:
raw = mne.io.read_raw_fif(row['raw_fname'], preload=True)
ch_indexs = set(raw.ch_names).intersection(ch_names)
# raw = mne.io.read_raw_fif(row['raw_fname'], preload=True)
ica_timeseries = ica.get_sources(raw, start=0, stop=100*raw.info['sfreq'])
freqs, power = welch(ica_timeseries._data, fs=raw.info['sfreq'])
log_power = np.log(power)
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), freqs]=log_power
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'kurtosis'] = kurtosis(ica_timeseries._data, axis=1)
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'component_num']= range(comp_num)
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'subjid'] = row['subjid']
try :
bads_ecg=ica.find_bads_ecg(raw, ch_name=ekg_ch, method='correlation')[1]
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_corr'] = bads_ecg
except:
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_corr'] = np.NaN
try:
bads_ecg_ctps = ica.find_bads_ecg(raw, ch_name=ekg_ch, method='ctps')[1]
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_ctps'] = bads_ecg_ctps
except:
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_ctps'] = np.NaN
try:
bads_veog = ica.find_bads_eog(raw, ch_name=veog_ch)[1]
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'veog_bads_corr'] = bads_veog
except:
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'veog_bads_corr'] = np.NaN
try:
bads_heog = ica.find_bads_eog(raw, ch_name=heog_ch)[1]
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'heog_bads_corr'] = bads_heog
except:
spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'heog_bads_corr'] = np.NaN
# veog_corr[index*comp_num:(index*comp_num + comp_num)] = ica.find_bads_eog(raw, ch_name=veog_ch)[1]
# heog_corr[index*comp_num:(index*comp_num + comp_num)] = ica.find_bads_eog(raw, ch_name=heog_ch)[1]
return spectra_dframe
def plot_topo_hack(normalized_topo):
ref_sens = mne.io.read_raw_fif('/fast/ICA/CAMCAN/sub-CC621184_ses-rest_task-rest_proc-sss_300srate.fif')
ref_sens.crop(0, 2)
ref_sens.pick_types(meg='mag')
ref_sens.load_data()
epochs = mne.make_fixed_length_epochs(ref_sens)
evoked = epochs.average()
if normalized_topo.shape.__len__() == 1:
evoked._data[:,0]=normalized_topo
evoked.plot_topomap(times=evoked.times[0], colorbar=False)
else:
evoked._data[:,:25]=normalized_topo
evoked.plot_topomap(times=evoked.times[0:25], ncols=5, nrows=5, colorbar=False)
def assess_ICA_topographic_properties(current_dframe):
'''Loop over all datasets and return ICA metrics'''
ref_sens = mne.io.read_raw_fif('/fast/ICA/CAMCAN/sub-CC621184_ses-rest_task-rest_proc-sss_300srate.fif', preload=True)
ref_sens.pick_types(meg='mag')
ref_ica_fname = '/fast/ICA/CAMCAN/sub-CC621184_ses-rest_task-rest_proc-sss_0-ica.fif'
ref_ica = mne.preprocessing.read_ica(ref_ica_fname)
current_dframe.reset_index(inplace=True, drop=True)
ica = mne.preprocessing.read_ica(current_dframe.iloc[0]['ica_filename'])
_, comp_num = ica.get_components().shape #_timeseries._data.shape
topo_dframe = pd.DataFrame(np.zeros([comp_num*len(current_dframe), 102]), columns = range(102))
for index,row in current_dframe.iterrows():
print(index)
veog_ch, heog_ch, ekg_ch = row[['veog', 'heog', 'ekg']]
ica = mne.preprocessing.read_ica(row['ica_filename'])
component = ica.get_components()
convert_to_ref=mne.forward._map_meg_or_eeg_channels(ica.info,
ref_sens.info,
# reference_sens.info,
'accurate',
(0., 0., 0.04))
normalized_topo = convert_to_ref @ component
mins_= normalized_topo.min(axis=0)
maxs_ = normalized_topo.max(axis=0)
standardized_topo = 2 * (normalized_topo - mins_ ) / (maxs_ - mins_) - 1
topo_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), range(102)]=standardized_topo.T
topo_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'component_num']= range(comp_num)
topo_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'subjid'] = row['subjid']
return topo_dframe
# Make an input dataframe of paths
dframe = populate_dframe()
# Loop over ICA filepaths to save out csv files
for dist in dframe.distribution.unique():
current_dframe = dframe[dframe.distribution==dist]
out_dframe = assess_ICA_properties(current_dframe)
out_dframe.to_csv(f'/fast/ICA/Spectra_{dist}.tsv', sep='\t', index=None)
# Compile csv files into larger dataframe again
dlist = []
for repo in ['CAMCAN', 'HCP','MOUS','NIH_HV']:
if 'tmp' in locals().keys() : del tmp
tmp = pd.read_csv(f'Spectra_{repo}.tsv', sep='\t')
tmp['distribution'] = repo
dlist.append(tmp)
combined = pd.concat(dlist)
combined.reset_index(inplace=True)
combined['ecg_bad']=combined['ecg_bads_ctps'] > 0.2
combined['eog_bad']= (np.abs(combined.heog_bads_corr) > .25) | (np.abs(combined.veog_bads_corr) > .25)
def merge_dframes_topo_spectral():
spectral_dframe =combined[combined.distribution.isin(['CAMCAN','MOUS'])].copy()
spectral_1_40 = spectral_dframe[spectral_dframe.columns[1:40]].copy()
mins_= spectral_1_40.min(axis=1).values #spectral_dframemalized_topo.min(axis=0)
maxs_ = spectral_1_40.max(axis=1).values # normalized_topo.max(axis=0)
# standardized_spetra = 2 * (spectral_1_40 - mins_ ) / (maxs_ - mins_) - 1
denom = maxs_ - mins_
numer = spectral_1_40.values - mins_[:,np.newaxis]
standardized_spectra = 2 * numer/denom[:,np.newaxis] - 1
spectral_dframe.iloc[:,1:40]=standardized_spectra
from scipy.stats import zscore
spectral_dframe.loc[spectral_dframe['kurtosis']>50,'kurtosis']=50
spectral_dframe.loc[:,'kurtosis']=zscore(spectral_dframe.loc[:,'kurtosis']).astype(np.float16)
bads_info = spectral_dframe[['subjid', 'ecg_bad','eog_bad','component_num','distribution','kurtosis']]
bads_info = spectral_dframe[['subjid', 'ecg_bad','eog_bad','component_num','distribution','kurtosis']\
+ list(spectral_dframe.columns[1:40])]
topo_dframe = pd.read_csv('Topo_Dframe_ELEK102_MOUS_CAM.tsv', sep='\t')
dframe=pd.merge(topo_dframe, bads_info, on=['subjid','component_num'])
dframe['bads'] = None# 'Good'
dframe.loc[dframe['ecg_bad'],'bads']='ECG'
dframe.loc[dframe['eog_bad'],'bads']='EOG'
dframe = dframe.sample(frac=1).reset_index(drop=True)
full_mat = pd.concat([dframe.iloc[:,range(102)],dframe.loc[:,spectral_dframe.columns[1:40]],
dframe.loc[:,'kurtosis']],axis=1)
reducer = umap.UMAP(n_components=3, n_neighbors=10, min_dist=0.05,
metric='manhattan')#'cosine')#'manhattan')#'cosine') #manhattan') #sine') #'manhattan')
embedding = reducer.fit_transform(full_mat.values) #normalized_data)
# umap.plot(reducer, labels=dframe['bads'])
fig, axes = matplotlib.pyplot.subplots(2,1, sharex=True, sharey=True,
figsize=(10,10))
### Up to the above - works
#fig.suptitle(dist)
sns.scatterplot(ax=axes[0], x=embedding[:,0], y=embedding[:,1],
hue=dframe['bads'])#, style=dframe['distribution']) #np.abs(combined_dframe['ecg_bad']))
sns.scatterplot(ax=axes[1], x=embedding[:,1], y=embedding[:,2],
hue=dframe['bads'])#, style=dframe['distribu
# sns.scatterplot(ax=axes[0,1], x=embedding[:,1], y=embedding[:,2],
# hue=dframe['ecg_bad']) #np.abs(combined_dframe['ecg_bad']))
sns.scatterplot(ax=axes[1], x=embedding[:,0], y=embedding[:,1],
hue=dframe['eog_bad'])#, style=dframe['distribution'])
# sns.scatterplot(ax=axes[1,1], x=embedding[:,1], y=embedding[:,2],
# hue=dframe['eog_bad'])
fig, axes = matplotlib.pyplot.subplots(2,2, sharex=True, sharey=True,
figsize=(20,20))
#fig.suptitle(dist)
sns.scatterplot(ax=axes[0,0], x=embedding[:,0], y=embedding[:,1],
hue=dframe['distribution'])#, style=dframe['distribution']) #np.abs(combined_dframe['ecg_bad']))
# sns.scatterplot(ax=axes[0,1], x=embedding[:,1], y=embedding[:,2],
# hue=dframe['ecg_bad']) #np.abs(combined_dframe['ecg_bad']))
sns.scatterplot(ax=axes[1,0], x=embedding[:,0], y=embedding[:,1],
hue=dframe['eog_bad'])#, style=dframe['distribution'])
# # Calculate the UMAP embedding
# ### Topographic Clustering
reducer = umap.UMAP(n_components=3, n_neighbors=50, min_dist=0.0,
metric='cosine') #manhattan') #sine') #'manhattan')
embedding = reducer.fit_transform(np.abs(full_mat.values))
# ### Frequency Clustering
reducer = umap.UMAP(n_components=3, n_neighbors=50, min_dist=0.0,
metric='cosine') #manhattan') #sine') #'manhattan')
embedding = reducer.fit_transform(spectra_dframe.values)
def plot_spectra_distr_ave(combined):
'''Plot average spectra for different open source repositories'''
freq_idxs = list(combined.columns[range(1,130)])
col_idxs = freq_idxs + ['subjid', 'distribution','ecg_bad', 'eog_bad']
tmp_dframe = combined.loc[:, col_idxs]
melted = pd.melt(tmp_dframe, id_vars=['distribution','subjid', 'ecg_bad', 'eog_bad'] ,
value_vars=freq_idxs)
sns.lineplot(x='variable', y='value', hue='distribution', data=melted)
def plot_ecg_std(combined):
combined=combined[combined.distribution != 'NIH_HV']
freq_idxs = list(combined.columns[range(1,40)])
# freq_idxs = list(combined.columns[range(1,130)])
col_idxs = freq_idxs + ['subjid', 'distribution','ecg_bad']
tmp_dframe = combined.loc[:, col_idxs]
melted = pd.melt(tmp_dframe, id_vars=['distribution','subjid', 'ecg_bad'] , value_vars=freq_idxs)
# sns.lineplot(x='variable', y='value', hue='distribution', style='ecg_bad', data=melted)
sns.lineplot(x='variable', y='value', hue='distribution', style='ecg_bad', data=melted, ci=np.var)
def topo_correlation_flip(reduced_dframe, ref_topo='first_val'):
if type(ref_topo)==str: #'first_val':
if ref_topo=='first_val':
ref_topo=reduced_dframe.iloc[0,range(102)]
elif ref_topo=='mean':
ref_topo=reduced_dframe.iloc[:,range(102)].mean(axis=0)
#If ref_topo is an array, this will be used as is
topos=reduced_dframe.copy().iloc[:,range(102)]
corrvals = [np.correlate(topos.iloc[i], ref_topo)[0] for i in range(len(topos))]
corrvals = np.array(corrvals)
corrvals[corrvals<0]=-1
corrvals[corrvals>0]=1
assert len(corrvals) == len(reduced_dframe)
reduced_dframe.iloc[:,range(102)] *= corrvals[:,np.newaxis]
return reduced_dframe
def plot_ecg_distribution():
topo_dframe = | pd.read_csv('Topo_Dframe_ELEK102_MOUS_CAM.tsv', sep='\t') | pandas.read_csv |
import math
import osmnx as ox
import matplotlib.pyplot as plt
import numpy as np
import geopy.distance
import imageio
from timeit import default_timer as timer
import pandas as pd
import seaborn as sns
import scipy
from scipy.stats import norm
import requests
import json
import os
from os.path import join, dirname, abspath
from glob import glob
import io
import pathlib
from pymongo import MongoClient
from datetime import datetime
from bson import ObjectId
from pyqtree import Index
from shapely import geometry
import random
import shapely.geometry as ge
import itertools
import networkx as nx
import shapely
import random
from shapely.geometry import LineString, Point
from sshtunnel import SSHTunnelForwarder
import os.path
class hostnameManager:
@staticmethod
def getHostName(hostType):
hostname='localhost'
if hostType in 'prod':
hostname='automotive.vizible.zone'
elif hostType in 'test':
hostname='dev.vizible.zone'
return hostname
@staticmethod
def getPemFileName(hostType):
pemFileName=''
if hostType in 'prod':
pemFileName='viziblezone-prod.pem'
elif hostType in 'test':
pemFileName='automotive-dev.pem'
return pemFileName
class mongoConnection:
def __init__(self):
self.client=None
self.server=None
self.db=None
def connectToDB(self,connectionType):
MONGO_HOST = hostnameManager.getHostName(connectionType)
MONGO_DB = "VizibleZone"
MONGO_USER = "ubuntu"
if (connectionType == 'prod'):
REMOTE_ADDRESS = ('docdb-2019-06-13-11-43-18.cluster-cybs9fpwjg54.eu-west-1.docdb.amazonaws.com', 27017)
else:
REMOTE_ADDRESS = ('127.0.0.1', 27017)
pem_ca_file = 'rds-combined-ca-bundle.pem'
pem_server_file = hostnameManager.getPemFileName(connectionType)
pem_path = '../pems/'
if not os.path.exists(pem_path + pem_server_file):
pem_path = pem_path[1:]
self.server = SSHTunnelForwarder(
MONGO_HOST,
ssh_pkey=pem_path + pem_server_file,
ssh_username=MONGO_USER,
remote_bind_address=REMOTE_ADDRESS
)
self.server = SSHTunnelForwarder(
MONGO_HOST,
ssh_pkey=pem_path + pem_server_file,
ssh_username=MONGO_USER,
remote_bind_address=REMOTE_ADDRESS
)
self.server.start()
if (connectionType == 'prod'):
self.client = MongoClient('127.0.0.1',
self.server.local_bind_port,
username='viziblezone',
password='<PASSWORD>',
ssl=True,
ssl_match_hostname=False,
ssl_ca_certs=(pem_path + pem_ca_file),
authMechanism='SCRAM-SHA-1') # server.local_bind_port is assigned local port
else:
self.client = MongoClient('127.0.0.1', self.server.local_bind_port) # server.local_bind_port is assigned local port
self.db = self.client[MONGO_DB]
print('db', self.db)
print('\nYou are connected to ' + connectionType + ' server\n')
return True
def dispose(self):
print("Closing connection to DB")
self.client.close()
self.server.stop()
def convert_str_to_datetime(row):
t = row['timestamp_local']
return datetime.strptime(t[:-3] + t[-2:], '%Y-%m-%dT%H:%M:%S.%f%z')
# In[77]:
import math
# {['latitude':1]},'gps_longitude':1 ,'gps_speed':1
def read_VZ_from_mongo(mc,_id):
dfjson = pd.DataFrame(mc.db.sensors.find({"_id": ObjectId(_id)}, {"_id": 1, 'gps': 1, 'user_id': 1, 'device_type': 1, "timestamp_local": 1}))
if len(dfjson) == 0:
print("_id {} is empty".format(_id))
return dfjson
# find number_of_samples
vecs = ['gps']
singles = ['_id', 'user_id', 'device_type', "timestamp_local"]
vecs_dfs = []
min_ts = np.inf
max_ts = 0
for column in vecs:
if column in dfjson.columns:
t = pd.DataFrame(dfjson[column][0])
if len(t) > 0:
t.columns = map(str.lower, t.columns)
min_ts = min(min_ts, t.timestamp.min())
max_ts = max(max_ts, t.timestamp.max())
merge_on = round(t.timestamp / 50) # time resolution 50ms
t = t.drop(["timestamp"], axis=1)
if "_id" in t.columns:
t = t.drop(["_id"], axis=1)
t = t.add_prefix(column + "_")
t["merge_on"] = merge_on
t = t.drop_duplicates(subset=["merge_on"])
vecs_dfs.append(t)
else:
print("{} is missing from _id {}".format(column, _id))
df_tmp = pd.DataFrame()
df_tmp["merge_on"] = np.arange(round(min_ts / 50), round(max_ts / 50))
df_tmp["timestamps_utc"] = pd.to_datetime(np.array(df_tmp.merge_on) * 50, unit='ms')
for df_i in vecs_dfs:
df_tmp = pd.merge(left=df_tmp, right=df_i, on="merge_on", how="left")
df_tmp = df_tmp.fillna(method="ffill")
df_tmp = df_tmp.iloc[np.arange(1, len(df_tmp), 2)] # take only 100ms
df_tmp = df_tmp.reset_index(drop=True)
for column in singles:
if column in dfjson.columns:
df_tmp[column] = dfjson[column][0]
else:
print("{} is missing from _id {}".format(column, _id))
df_tmp = df_tmp.rename(columns={"gps_bearing": "gps_azimuth",
"gps_bearing_accuracy": "gps_azimuth_accuracy", 'testing_mode_value': 'testing_mode'})
# correct and add columns
# create timestamps_value (local)
s = df_tmp.timestamp_local.iloc[0]
seconds_tz = int(s[-5:-3]) * 3600 + int(s[-2:]) * 60
df_tmp["timestamp"] = df_tmp.timestamps_utc.dt.tz_localize('UTC').dt.tz_convert(seconds_tz)
df_tmp["timestamps_value"] = df_tmp["timestamp"]
# clean zeros in the lat/long reading
df_tmp = df_tmp[df_tmp["gps_latitude"] < df_tmp["gps_latitude"].median() + 1]
df_tmp = df_tmp[df_tmp["gps_latitude"] > df_tmp["gps_latitude"].median() - 1]
# def calc_tot_acceleration(row):
# r = row['linear_acceleration_x_axis'] ** 2 + row['linear_acceleration_y_axis'] ** 2 + row[
# 'linear_acceleration_z_axis'] ** 2
# return r ** 0.5
#
#
# def calc_tot_gyro(row):
# r = row['gyroscope_x_axis'] ** 2 + row['gyroscope_y_axis'] ** 2 + row['gyroscope_z_axis'] ** 2
# return r ** 0.5
#
# df_tmp['linear_acceleration'] = df_tmp.apply(calc_tot_acceleration, axis=1)
# df_tmp['gyroscope_tot'] = df_tmp.apply(calc_tot_gyro, axis=1)
return df_tmp
def get_df_for_ids(mc,ids):
print(len(ids), ' ids')
print(ids)
# list_ids=list(df_walk._id)
df_vz = | pd.DataFrame() | pandas.DataFrame |
# coding: utf8
from __future__ import unicode_literals
from pathlib import Path
from spacy.util import load_model_from_init_py, get_model_meta
from spacy.language import Language
from spacy.tokens import Span
from spacy.matcher import PhraseMatcher
import os.path
from collections import defaultdict
import string
import re
import glob
import pandas as pd
import csv
import re
import sys
def read_in_files(labels: list, directory: str):
all_entities_dict = defaultdict(list)
#find all files in chosen directory
for file in os.listdir(directory):
encoding = 'utf-8'
if file.endswith(".csv"):
path = os.path.join(directory, file)
filename = re.sub('.csv', '', file)
if filename == 'land_loc':
encoding = 'latin-1'
label = find_label(filename, labels)
if label != None:
#get entity_list for that file and that label
entity_list = get_entity_list(path, encoding, label, filename)
for entity in entity_list:
all_entities_dict['label'].append(label)
all_entities_dict['name'].append(entity)
#create DataFrame from pairs of label and name
new_df = | pd.DataFrame.from_dict(all_entities_dict) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import ipaddress
import codecs
import time
import pandas as pd
import urllib3
from urllib3 import util
from classifier4gyoithon.GyoiClassifier import DeepClassifier
from classifier4gyoithon.GyoiExploit import Metasploit
from classifier4gyoithon.GyoiReport import CreateReport
from util import Utilty
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Identify product name using signature.
def identify_product(categoy, target_url, response, utility):
product_list = []
reason_list = []
full_path = os.path.dirname(os.path.abspath(__file__))
file_name = 'signature_' + categoy + '.txt'
try:
with codecs.open(os.path.join(full_path + '/signatures/', file_name), 'r', 'utf-8') as fin:
matching_patterns = fin.readlines()
for pattern in matching_patterns:
items = pattern.replace('\r', '').replace('\n', '').split('@')
keyword_list = []
product = items[0]
signature = items[1]
list_match = re.findall(signature, response, flags=re.IGNORECASE)
if len(list_match) != 0:
# Output result (header)
keyword_list.append(list_match)
utility.print_message(OK, 'category : {}'.format(categoy))
utility.print_message(OK, 'product : {}'.format(product))
utility.print_message(OK, 'reason : {}'.format(keyword_list))
utility.print_message(OK, 'target url : {}'.format(target_url))
utility.print_message(NONE, '-' * 42)
product_list.append(product)
reason_list.append(keyword_list)
except Exception as err:
utility.print_exception(err, '{}'.format(err))
return product_list, reason_list
# Classifier product name using signatures.
def classifier_signature(ip_addr, port, target_url, response, log_file, utility):
utility.print_message(NOTE, 'Analyzing gathered HTTP response using Signature.')
ip_list = []
port_list = []
vhost_list = []
judge_list = []
version_list = []
reason_list = []
scan_type_list = []
ua_list = []
http_ver_list = []
ssl_list = []
sni_list = []
url_list = []
log_list = []
product_list = []
for category in ['os', 'web', 'framework', 'cms']:
products, keywords = identify_product(category, target_url, response, utility)
for product, keyword in zip(products, keywords):
ip_list.append(ip_addr)
port_list.append(port)
vhost_list.append(ip_addr)
judge_list.append(category + ':' + str(product))
version_list.append('-')
reason_list.append(keyword)
scan_type_list.append('[ip]')
ua_list.append('-')
http_ver_list.append('HTTP/1.1')
ssl_list.append('-')
sni_list.append('-')
url_list.append(target_url)
log_list.append(log_file)
product_list.append(product)
if len(product_list) == 0:
utility.print_message(WARNING, 'Product Not Found.')
return []
# logging.
series_ip = pd.Series(ip_list)
series_port = pd.Series(port_list)
series_vhost = pd.Series(vhost_list)
series_judge = pd.Series(judge_list)
series_version = pd.Series(version_list)
series_reason = pd.Series(reason_list)
series_scan_type = pd.Series(scan_type_list)
series_ua = pd.Series(ua_list)
series_http_ver = | pd.Series(http_ver_list) | pandas.Series |
import unittest
import datetime
from mock import Mock, patch # , ANY
from records_mover.records.schema.field import RecordsSchemaField
import numpy as np
import pandas as pd
class TestField(unittest.TestCase):
maxDiff = None
@patch('records_mover.records.schema.field.pandas.refine_field_from_series')
def test_refine_from_series(self, mock_refine_field_from_series):
mock_name = Mock(name='name')
mock_field_type = Mock(name='field_type')
mock_constraints = Mock(name='constraints')
mock_statistics = Mock(name='statistics')
mock_representations = Mock(name='representations')
mock_series = Mock(name='series')
mock_total_rows = Mock(name='total_rows')
mock_rows_sampled = Mock(name='rows_sampled')
field = RecordsSchemaField(name=mock_name,
field_type=mock_field_type,
constraints=mock_constraints,
statistics=mock_statistics,
representations=mock_representations)
field.refine_from_series(mock_series, mock_total_rows, mock_rows_sampled)
mock_refine_field_from_series.assert_called_with(field, mock_series, mock_total_rows,
mock_rows_sampled)
def test_is_more_specific_type_true(self):
self.assertTrue(RecordsSchemaField.is_more_specific_type('integer', 'string'))
def test_is_more_specific_type_false_same(self):
self.assertFalse(RecordsSchemaField.is_more_specific_type('string', 'string'))
def test_is_more_specific_type_false(self):
self.assertFalse(RecordsSchemaField.is_more_specific_type('string', 'integer'))
@patch('records_mover.records.schema.field.pandas.field_from_index')
def test_from_index(self, mock_field_from_index):
mock_index = Mock(name='index')
mock_processing_instructions = Mock(name='processing_instructions')
out = RecordsSchemaField.from_index(mock_index, mock_processing_instructions)
mock_field_from_index.\
assert_called_with(index=mock_index,
processing_instructions=mock_processing_instructions)
self.assertEqual(out, mock_field_from_index.return_value)
@patch('records_mover.records.schema.field.sqlalchemy.field_from_sqlalchemy_column')
def test_from_sqlalchemy_column(self, mock_field_from_sqlalchemy_column):
mock_column = Mock(name='column')
mock_driver = Mock(name='driver')
mock_rep_type = Mock(name='rep_type')
out = RecordsSchemaField.from_sqlalchemy_column(column=mock_column,
driver=mock_driver,
rep_type=mock_rep_type)
mock_field_from_sqlalchemy_column.\
assert_called_with(column=mock_column,
driver=mock_driver,
rep_type=mock_rep_type)
self.assertEqual(out, mock_field_from_sqlalchemy_column.return_value)
@patch('records_mover.records.schema.field.sqlalchemy.field_to_sqlalchemy_type')
def test_to_sqlalchemy_type(self, mock_field_to_sqlalchemy_type):
mock_driver = Mock(name='driver')
mock_name = Mock(name='name')
mock_field_type = Mock(name='field_type')
mock_constraints = Mock(name='constraints')
mock_statistics = Mock(name='statistics')
mock_representations = Mock(name='representations')
field = RecordsSchemaField(name=mock_name,
field_type=mock_field_type,
constraints=mock_constraints,
statistics=mock_statistics,
representations=mock_representations)
out = field.to_sqlalchemy_type(mock_driver)
mock_field_to_sqlalchemy_type.assert_called_with(field, mock_driver)
self.assertEqual(out, mock_field_to_sqlalchemy_type.return_value)
@patch('records_mover.records.schema.field.sqlalchemy.field_to_sqlalchemy_column')
def test_to_sqlalchemy_column(self, mock_field_to_sqlalchemy_column):
mock_driver = Mock(name='driver')
mock_name = Mock(name='name')
mock_field_type = Mock(name='field_type')
mock_constraints = Mock(name='constraints')
mock_statistics = Mock(name='statistics')
mock_representations = Mock(name='representations')
field = RecordsSchemaField(name=mock_name,
field_type=mock_field_type,
constraints=mock_constraints,
statistics=mock_statistics,
representations=mock_representations)
out = field.to_sqlalchemy_column(mock_driver)
mock_field_to_sqlalchemy_column.assert_called_with(field, mock_driver)
self.assertEqual(out, mock_field_to_sqlalchemy_column.return_value)
def test_python_type_to_field_type(self):
mock_unknown_type = Mock(name='unknown_type')
out = RecordsSchemaField.python_type_to_field_type(mock_unknown_type)
self.assertIsNone(out)
def test_cast_series_type_time_empty(self):
mock_name = Mock(name='name')
mock_field_type = 'time'
mock_constraints = Mock(name='constraints')
mock_statistics = Mock(name='statistics')
mock_representations = Mock(name='representations')
field = RecordsSchemaField(name=mock_name,
field_type=mock_field_type,
constraints=mock_constraints,
statistics=mock_statistics,
representations=mock_representations)
data = np.array([])
series = pd.Series(data)
new_series = field.cast_series_type(series)
self.assertIsNotNone(new_series)
def test_cast_series_type_time_timedelta_entries(self):
mock_name = Mock(name='name')
mock_field_type = 'time'
mock_constraints = Mock(name='constraints')
mock_statistics = Mock(name='statistics')
mock_representations = Mock(name='representations')
field = RecordsSchemaField(name=mock_name,
field_type=mock_field_type,
constraints=mock_constraints,
statistics=mock_statistics,
representations=mock_representations)
data = np.array([ | pd.Timedelta(hours=1, minutes=23, seconds=45) | pandas.Timedelta |
from bs4 import BeautifulSoup
import pandas as pd
import requests
def get_soup(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
return soup
def scrape_mat_names():
soup = get_soup('https://gamepress.gg/grandorder/materials')
tables = soup.find('div', attrs={'class': 'view-content'}).find_all('table')
skill_gem_table = tables[3]
mats_table = tables[0]
asc_mats_table = tables[2]
mat_names = []
mats_tables = (skill_gem_table, mats_table, asc_mats_table)
for table in mats_tables:
rows = table.find_all('tr')
for row in rows:
name = [text for text in row.stripped_strings][0]
mat_names.append(name.strip('\u200b'))
return mat_names
def set_mats_data(df, asc_level, skill_levels):
asc_col_clear = []
for i in range(2, asc_level+1):
if i == 5:
i = 'Max'
asc_col_clear.append(f'Asc{i}')
for col in asc_col_clear:
df[col] = [0] * df.shape[0]
for i in range(2, 11):
col_name = f'Skl{i}'
mult = 0
for level in skill_levels:
if i <= level:
mult += 1
for j in range(len(df)):
amount = df.loc[j, col_name]
subtract = (amount // 3) * mult
df.loc[j, col_name] = amount - subtract
return df
def make_servants_table(servants):
df = pd.read_csv('./csvs/all_servants.csv')
df_new = pd.DataFrame()
for servant in servants:
df2 = df[df['Name'] == servant['name']]
df2 = df2.reset_index(drop=True)
# if len(df2) == 0:
# print('ERROR:', servant['name'])
df2 = set_mats_data(df2, servant['ascension'], servant['skills'])
priority_column = [servant['priority']] * len(df2)
df2.insert(2, 'Priority', priority_column)
df_new = pd.concat([df_new, df2])
return df_new.reset_index(drop=True)
def calculate_mats(df, mats):
mat_cols = df.columns.tolist()
for col in ['Name', 'Material', 'Priority']:
mat_cols.remove(col)
priority_3_cols = ['Asc2', 'Asc3', 'Asc4', 'AscMax']
priority_2_cols = priority_3_cols.copy()
for i in range(2, 7):
priority_2_cols.append(f'Skl{i}')
mats_df = | pd.DataFrame(columns=['Have', 'Need', 'Want']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Recommender Systems - Introduction
#
# Today, recommender systems (RS) are all around us, directing, or inducing, us to make our decisions on whether we buy that shirt, listen to Aerosmith or Coldplay or even suggesting past diseases diagnosis giving a patient's condition.
#
# The main factor that brought the due attention to this was probably the internet. Due to the flood of information we suffer today on media sources and advertisements, people have a lot of struggle to find, or even define, what they want. On the other hand, this amount of data allowed scientist to create plataforms who would analyse all of this and would try to bring only the necessary information that a user would like in a short span of time. This is only a basic, defition of a RS. We can dig a litlle deeper and evaluate other possible ways we can recommend itens to a person and we will end up with the main existing fields:
#
# * **Non-personalised and Stereotyped**: The most basic system. It doesn't evaluate other people's individual opinion, but use summary statistcs from the overall population.
# * **Content Based**: Takes into consideration what a person likes and, given the characteristics of the existing itens, it recommends the most probable itens the user would like.
# * **Collaborative**: Takes into consideration what a person likes and also what other similar people like. In this way, we can give recommendations as, as you and person P likes itens A,B and C, and person P have liked item D also, you could like item D as well.
#
# This notebook is going to be about the first system, Non personalised and Stereotyped recommendations.
#
# <img src="images/notebook1_image1.jpeg">
# # Non Personalised Recommendation
#
# The most basic way to provide recommendations is a non-personalised one. Non-personalised recommendations don't take user's individual preferences nor context into consideration.
#
# Take for instance a newly create client at Amazon. He wouldn't have bough any item on the marketplace, so Amazon doesn't know what the particular tastes of this new person are, so the best way to start with any possible recommendation that the new customer could like is what other clients, regardless of any their individual tastes, had also bought.
#
# ## Stereotyped Recommendation
#
# One little improvement we can make still on the domain of non-personalised recommendations is to do crude sterotype divisions on the metrics. Basic ratings per sex, city or economical status are some examples of categories in can easily create and can improve the recommendation quality if we believe there are really distinct products who are directed for each of these segments.
#
# <img src="images/notebook1_image2.jpg" width="400">
# # Small data analysis
# In order to proper understand, let's work wih a table from [Coursera's Recommender System Course 1](https://drive.google.com/file/d/0BxANCLmMqAyIeDJlYWU0SG5YREE/view?usp=sharing) and take a look at one movie matrix and their respective user's ratings. Each row is a user and each column is a movie. Movies that a specific user didn't rate is shown as *Nan*.
# In[1]:
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
# In[2]:
reviews = | pd.read_csv('data/non_personalised_stereotyped_rec.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
df_train = pd.read_csv('train.csv')
df_breed = | pd.read_csv('breed_labels.csv') | pandas.read_csv |
import numpy as np
np.random.seed(875431)
import pandas as pd
from scipy import signal
import os
import astron_common_functions as astronfuns
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.font_manager as font_manager
print("matplotlibrc loc: ",matplotlib.matplotlib_fname())
# plt.ion()
font_path = '/home/anup/.matplotlib/fonts/arial.ttf'
fontprop = font_manager.FontProperties(fname=font_path)
import h5py
# =====================================================================================
# load data: Provide the path and filename of the hdf5 file containing the analyzed data
dir1 = "/home/anup/data/dhpg100000nM2s/analysis/"
fnameh5py = "dhpg100000nM2s_cacyt_rel_event_features.hdf5"
figsavepath = dir1 # path to the folder where figures will be saved
# ------------
# dir1 = "/home/anup/goofy/data/suhitalab/astron/cooked/new_2020_python/dhpg100000nM2s_cacyt_rel_features"
# fnameh5py = "dhpg100000nM2s_cacyt_rel_event_features.hdf5"
# figsavepath = "/home/anup/goofy/data/suhitalab/astron/figures/new_2020_python/dhpg100000nM2s" # path to the folder where figures will be saved
# ------------
binscacytpk = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"binscacytpk")
binscacytrt = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"binscacytrt")
binscacytdk = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"binscacytdk")
binscacytfw = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"binscacytfw")
hcacytpk = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"hcacytpk")
hcacytrt = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"hcacytrt")
hcacytdk = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"hcacytdk")
hcacytfw = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"hcacytfw")
# ------------
dmaxtcacyt = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"dmaxtcacyt")
dmaxtkrrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"dmaxtkrrel")
dmaxtffrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"dmaxtffrel")
# ------------
binst = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"binst")
psthca = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"psthca")
psthrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"psthrel")
psthkrrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"psthkrrel")
psthffrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"psthffrel")
tc_bins = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_bins")
tc_nkrrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nkrrel")
tc_nkrdoc = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nkrdoc")
tc_nkrend = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nkrend")
tc_nkracd = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nkracd")
tc_nffrel = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nffrel")
tc_nffmob = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nffmob")
tc_nffend = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nffend")
tc_nffacd = astronfuns.loadh5pydata(os.path.join(dir1,fnameh5py),"tc_nffacd")
# ------------
# fgiure legend labels and colors
grouplabels = ["Control",r"$A\beta$-mGluR",r"$A\beta$-PMCA",r"$A\beta$-mGluR & PMCA"]
plotcolors = np.array([
[0,0,0],
[1,0,0],
[0,0,1],
[0,1,0]
])
# =========================================================================================================================
# Figures 3D&E
# Timecourse plots of release event features: number of docked/mobile vesicle release-ready/released/endocytosed/reacidified
# ==========================================================================================================================
# datasets = [tc_nkrdoc,tc_nkrrel,tc_nkrend,tc_nkracd]
# datasets = [tc_nffmob,tc_nffrel,tc_nffend,tc_nffacd]
# ndataset = len(datasets)
# ngroup = 1
# nbatch = 1
# ntrial = 280
# lineavgs = np.array([np.mean(dataset,axis=1)*400 for dataset in datasets])
# linesems = np.array([np.std(dataset,axis=1)/np.sqrt(nbatch)*400 for dataset in datasets])
# # downsample data to reduce plot image filesize
# itc_bins_ds = np.linspace(0,len(tc_bins)-1,150,dtype=int)
# tc_bins_ds = tc_bins[itc_bins_ds]
# lineavgs_ds = lineavgs[:,:,itc_bins_ds]
# linesems_ds = linesems[:,:,itc_bins_ds]
# print(tc_bins_ds.shape,tc_bins[0],tc_bins[-1],lineavgs_ds.shape,linesems.shape)
# # --------------
# fh1,ah11 = plt.subplots(figsize=(2,2),dpi=600,frameon=False,ncols=1,gridspec_kw={"width_ratios":[1]})
# plotcolors = np.array([
# [0,0,0],
# [1,0,0],
# [1,0,1],
# [0,1,1]
# ])
# for iset in range(0,ndataset):
# for igroup in range(0,1):
# ph1 = ah11.plot(tc_bins_ds-200,lineavgs_ds[iset][igroup,:],marker='',linestyle="-",color=plotcolors[iset,:],markersize=-0.5)
# for itc in range(0,len(tc_bins_ds)):
# # ph1 = ah11.plot(tc_bins[itc]-200,lineavgs[iset][igroup,itc],marker='o',linestyle="-",color=plotcolors[iset,:],markersize=1)
# error1 = lineavgs_ds[iset][igroup,itc] - linesems_ds[iset][igroup,itc]
# error2 = lineavgs_ds[iset][igroup,itc] + linesems_ds[iset][igroup,itc]
# ah11.plot([tc_bins_ds[itc]-200,tc_bins_ds[itc]-200],[error1,error2],linestyle="-",color="grey",linewidth=1)
# # }
# # }
# # }
# # ah11.plot([0,2],[-25,-25],linewidth=1,color="black") # kr
# ah11.plot([0,2],[-5,-5],linewidth=1,color="black") # ff
# # formatting
# # ah11.set_xlim([-4,20]) # kr
# # xticks = [0,5,10,15,20] # kr
# ah11.set_xlim([-5,30]) # ff
# xticks = [0,10,20,30] # ff
# ah11.set_xticks(xticks)
# ah11.set_xticklabels(xticks,fontsize=8,font=fontprop)
# # ah11.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# ah11.set_xlabel("Time (s)",fontsize=8,font=fontprop)
# # yticks = [0,200,400] # kr
# # ah11.set_ylim([-100,440]) # kr
# yticks = [0,30,60] # ff
# ah11.set_ylim([-20,50]) # ff
# ah11.set_yticks(yticks)
# ah11.set_yticklabels(yticks,fontsize=8,font=fontprop)
# ah11.spines["right"].set_visible(False)
# ah11.spines["top"].set_visible(False)
# # matplotlib.rcParams["mathtext.sf"]
# # yaxislabel = r'#$Ca^{2+}$ events' # cacyt
# # yaxislabel = r'# Full fusion events'
# yaxislabel = r'# Number of mobile vesicles'
# ah11.set_ylabel(yaxislabel,fontsize=8,font=fontprop)
# # saving figures
# # fh1_name = "dhpg100000nM2s_ff_ves_features.svg"
# # fh1.savefig(os.path.join(figsavepath,fh1_name))
# # plt.show()
# =====================================================================================
# Figures 1H & 3F
# Plot calcium/release psth with experimental data
# =====================================================================================
# Load experimental data for DHPG-mediated ca2+ events
folder1 = "/home/anup/goofy/codes/astron/validation_data"
# fname1 = "marchaland2008_dhpg_cacyt_histogram.csv" # validation data psth cacyt
fname1 = "marchaland2008_dhpg_release_histogram.csv" # validation data psth release
dfexp = pd.read_csv(os.path.join(folder1,fname1)); # load validation data set
# interpolate experimental data to fill make the time bins 50 ms
tbinsexp = np.arange(-1,5,50e-3)
# expy = np.interp(tbinsexp,dfexp["time"],dfexp["ca_cyt_events"]) # validation psth cacyt
expy = np.interp(tbinsexp,dfexp["time"],dfexp["release"]) # validation psth releases
expy[expy<0] = 0
expy[tbinsexp>max(dfexp["time"])] = 0
dfexp2 = | pd.DataFrame({"tbins":tbinsexp,"psth":expy}) | pandas.DataFrame |
import pandas as pd
from dataset import *
from models import *
from utils import *
from sklearn.metrics import mean_squared_error
if __name__ == '__main__':
def test_model(model, dl, scaler: StandardScaler, metric = root_mean_squared_error()):
list_test_rmse = []
list_ys = []
for x, y in dl:
y_hat_proper = scaler.inverse_transform(model(x).detach())
y_proper = scaler.inverse_transform(y)
list_test_rmse.append( np.sqrt(mean_squared_error(y_hat_proper, y_proper))
/ len(y_proper) if len(y_proper) != 0 else 1
)
list_ys.append( (y_proper, y_hat_proper) )
return float(np.mean(np.stack(list_test_rmse))), list_ys
def train_model(df: pd.DataFrame,
model: nn.Module,
memory: int,
batch_size: int,
error_bound: int,
flatten_xs: bool,
output_parent_folder: str,
output_name: str,
):
# df = df.head(100000) # TODO: REMOVE!
dm = DataModule(df,
memory=memory,
horizon=horizon,
batch_size=batch_size,
flatten_xs=flatten_xs,
error_bound=error_bound)
raw_dm = DataModule(df,
memory=memory,
horizon=horizon,
batch_size=batch_size,
flatten_xs=flatten_xs,
error_bound=None)
train_dataloader = dm.train_dataloader()
device = torch.device('cuda')
cpu_device = torch.device('cpu')
model = model.to(device)
loss_foo = root_mean_squared_error()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler_lr = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_gamma)
###
# Training loop
before_training = DateUtils.now()
for epoch in range(0, epochs):
loss_list = []
for x, y in train_dataloader:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
y_hat = model(x)
loss = loss_foo(y, y_hat)
loss_list.append(loss.cpu().detach().numpy())
loss.backward()
optimizer.step()
scheduler_lr.step()
epoch_loss = np.mean(np.stack(loss_list))
print(f"Loss at epoch={epoch+1}: {float(epoch_loss)}, took: {DateUtils.now() - before_training}")
model = model.to(cpu_device)
model.eval()
test_rmse, raw_test_rmse = -1.0, -1.0
for y_type in ["raw", "compressed"]:
if (y_type == "raw"):
test_rmse, list_ys = test_model(model, dm.test_dataloader(), scaler=dm.scaler)
else:
raw_test_rmse, list_ys = test_model(model, raw_dm.test_dataloader(), scaler=dm.scaler)
total_test_ys = np.concatenate(list(map(lambda array: np.stack(array).reshape(-1, 60), list_ys)), axis=0)
columns = [f"y_{h_i}" for h_i in range(horizon)] + [f"y_hat_{h_i}" for h_i in range(horizon)]
ys_df = pd.DataFrame(total_test_ys, columns=columns)
ys_output_file_path = f"{os.path.join(output_parent_folder, f'{output_name}_y_outputs_{y_type}.csv')}"
ys_df.to_csv(ys_output_file_path)
# print(f"Test RMSE: {test_rmse}")
return model, float(epoch_loss), float(test_rmse), float(raw_test_rmse)
horizon = 30
memory = 60
batch_size = 512
hidden_size = 16
epochs = 15
learning_rate = 0.005
lr_gamma = 0.9
before_everything = DateUtils.now()
output_super_parent_folder = f"{os.path.join(FileUtils.project_root_dir(), 'results', 'forecasting_results', f'{before_everything.month}{before_everything.day}')}"
output_parent_folder = f"{os.path.join(output_super_parent_folder, f'{before_everything.hour}-{before_everything.minute}')}"
output_csv_path = f"{os.path.join(output_parent_folder, f'output_{before_everything.month}-{before_everything.day}_{before_everything.hour}-{before_everything.minute}.csv')}"
FileUtils.create_dir(output_parent_folder)
# TODO: fill dynamically
parquet_file_paths = [
# f"{os.path.join(FileUtils.project_root_dir(), 'data', 'REDD-Cleaned-f32', 'lg_only','house_1-channel_1_output_data_points.parquet')}",
# f"{os.path.join(FileUtils.project_root_dir(), 'data', 'REDD-Cleaned-f32', 'lg_v3_d5', 'house_1-channel_1_output_data_points.parquet')}",
# f"{os.path.join(FileUtils.project_root_dir(), 'data', 'REDD-Cleaned-f32', 'lg_v3_d10', 'house_1-channel_1_output_data_points.parquet')}",
# f"{os.path.join(FileUtils.project_root_dir(), 'data', 'REDD-Cleaned-f32', 'lg_v3_d25', 'house_1-channel_1_output_data_points.parquet')}",
# f"{os.path.join(FileUtils.project_root_dir(), 'data', 'REDD-Cleaned-f32', 'pmc_only', 'house_1-channel_1_output_data_points.parquet')}",
f"{os.path.join(FileUtils.project_root_dir(), 'data', 'REDD-Cleaned-f32', 'swing', 'house_1-channel_1_output_data_points.parquet')}",
]
error_bound_list = [0, 1, 2, 5, 10, 25, 50, None]
model_type_list = ['turbo_lstm']
hidden_size_list = [hidden_size]
# hidden_size_list = [4, 8, 16, 32, 48, 80] # TODO: remove for LR
total_run_count = len(error_bound_list) * len(model_type_list) * len(parquet_file_paths) * len(hidden_size_list)
current_run = 0
for parquet_path in parquet_file_paths:
df = pd.read_parquet(parquet_path)
for error_bound in error_bound_list:
for model_type in model_type_list:
for hidden_size in hidden_size_list:
current_run+=1
print(f"Current run: {current_run} / {total_run_count} | Date: {DateUtils.now()}")
# if (current_run <= 21):
# print("Skipping...")
# continue
if (model_type == 'lstm'):
model = BasicLSTM_simple(hidden_size=hidden_size,
output_length=horizon)
elif(model_type == 'lr'):
model = LinearRegression(memory, horizon)
elif(model_type == 'turbo_lstm'):
model = LSTM_with_skip(memory_length=memory,
hidden_size=hidden_size,
output_length=horizon)
else:
raise ValueError(f"Model type: '{model_type}' is unsupported!")
flatten_xs = True if model_type in ['lr'] else False
dataset_name = str(Path(parquet_path).parent.name)
before_training = DateUtils.now()
trained_model, train_rmse, rmse, raw_rmse = train_model(
df,
model=model,
memory=memory,
batch_size=batch_size,
error_bound=error_bound,
flatten_xs=flatten_xs,
output_parent_folder=output_parent_folder,
output_name=f"{model_type}_{dataset_name}_E{error_bound if not None else 'RAW'}"
)
output_dict = {
'dataset_name': dataset_name,
'model_type': model_type,
'error_bound': error_bound if (error_bound is not None) else -1,
'epochs': epochs,
'memory': memory,
'horizon': horizon,
'batch_size': batch_size,
'hidden_size': hidden_size if (model_type != 'lr') else -1,
'train_rmse': train_rmse,
'rmse': rmse,
'rmse_on_raw': raw_rmse,
'train_start': before_training,
'lr': learning_rate,
'lr_gamma': lr_gamma,
}
output_csv_df = None
try:
output_csv_df = pd.read_csv(output_csv_path)
current_output_df = | pd.DataFrame(output_dict, index=[1]) | pandas.DataFrame |
"""
@Author: <NAME>
For more analysis go to .ipynb
This file is only containes final solution code
"""
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.model_selection import train_test_split
# Simply to read the data from csv file.
data = pd.read_csv("train.csv")
# To check the shape of the dataframe we have
print("Shape of data is: ",data.shape)
print("Number of rows we have: ",data.shape[0])
print("Number of columns we have: ",data.shape[1])
# Lets drop duplicates
data.drop_duplicates(subset=['Gender','Age','Region_Code','Occupation', 'Channel_Code','Vintage','Credit_Product','Avg_Account_Balance','Is_Active','Is_Lead'],keep="first",inplace=True)
# This is like onehot encoding
# Apply one hot encoding based on the occupation
data = pd.concat([data,pd.get_dummies(data.Occupation)],axis=1)
# Then we have to drop the occupation column from the main data
data.drop(["Occupation"],axis=1,inplace=True)
# Apply one hot encoding based on the region code
data = pd.concat([data,pd.get_dummies(data.Region_Code)],axis=1)
data.drop(["Region_Code"],axis=1,inplace=True)
data = pd.concat([data, | pd.get_dummies(data.Channel_Code) | pandas.get_dummies |
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = | pd.DataFrame([self.oj]) | pandas.DataFrame |
# for adding data(bills,elevator,etc.) as input please type append(root_out) in python console which root_out is the path including the name of the csv file that your inputs will be saved in there
# for dividing expenses please type execute_all_division_funcs(root_in, root_out, root_info) in python console
# for acquiring a balance for specific units and specified time period please type balance(root_in) in python console
# for obtaining a csv file including a transaction history between a specific time period transaction_history(root_in) in python console
# for acquiring a percent share from total expenses that categories or subcategories have, please type portion_by_category(root_in) or portion_by_subcategory(root_in) in python console
# for acquiring a percent share from total expenses that units have, please type portion_by_unit(root_in) in python console
# for acquiring a cumulative sum based on units type cumulative_sum_for_units(root_in) and for acquiring a cumulative sum based on subcategories type cumulative_sum_for_subcategories(root_in) in console python
# for observing a status of the buildings total balance please type negative_balance_error(root_in) in python console
# for acquiring an estimation of next year's monthly expenses for each unit please type next_year_expenditure_estimation(root_in, root_info) in python console
def append(root_out: str):
""" This function accepts inputs from the user. The root_out variable is
the path and the name of the csv file that you want to save your inputs into. """
import pandas as pd
import datetime as dt
d = {'amount': [], 'time':[], 'category': [] , 'subcategory': [],
'responsible unit': [], 'related unit': [[]],
'div': [], 'description': []}
amount = int(input('amount:'))
d['amount'].append(amount)
time = input('time( Example: 1399/09/21 ) : ')
d['time'].append(dt.date(int(time[0:4]),int(time[5:7]), int(time[8:])))
category = input("category: 1) bill 2) cleaning 3) elevator 4) parking 5) repairs 6) charge 7) other [1/2/3/4/5/6/7] :")
if category == '1':
d['category'].append('bill')
elif category == '2':
d['category'].append('cleaning')
elif category == '3':
d['category'].append('elevator')
elif category == '4':
d['category'].append('parking')
elif category == '5':
d['category'].append('repairs')
elif category == '6':
d['category'].append('charge')
elif category == '7':
d['category'].append('other')
if category == '1':
subcategory = input('subcategory: 1) water 2) gas 3) electricity 4) tax [1/2/3/4] :')
if subcategory == '1':
subcategory = 'water'
elif subcategory == '2':
subcategory = 'gas'
elif subcategory == '3':
subcategory = 'electricity'
elif subcategory == '4':
subcategory = 'tax'
else:
subcategory = 'undefind'
d['subcategory'].append(subcategory)
responsible_unit = input('responsible unit:')
d['responsible unit'].append(responsible_unit)
related_unit = input('related unit:(please enter the related units as the form first unit number, second unit number,....Note that if you want to include all units you should enter the number of all units)').split(',')
for e in related_unit:
d['related unit'][0].append(eval(e))
div = input('div: 1) -e 2) -r 3) -d 4) -a 5) -p [1/2/3/4/5] :(Note that if you have selected charge as a category, -d must be chosen as the division type.)')
if div == '1':
div = 'equal'
d['div'].append(div)
elif div == '2':
div = 'number'
d['div'].append(div)
elif div == '3':
div = 'default'
d['div'].append(div)
elif div == '4':
div = 'area'
d['div'].append(div)
elif div == '5':
div = 'parking'
d['div'].append(div)
description = input('description:')
d['description'].append(description)
i = input('Is there anything left? A)yes B)no [A/B] :')
if i == 'B':
pd.DataFrame(d).to_csv(root_out, mode = 'a', header= False, index = False)
return
else:
| pd.DataFrame(d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Bootstrap - Top Gun Stochastic Modelling Class
Created on Tue Sep 8 08:17:30 2020
@author: <NAME>
"""
# %% IMPORTs CELL
# Default Imports
import numpy as np
import pandas as pd
import scipy.linalg as LA
# Plotly for charting
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
# %% CLASS MODULE
class Bootstrap(object):
""" Portfolio Stochastic Modelling Class Modules
Currently offers empirical ONLY stochastic modelling for individual ports
as well as across a range of ports (called frontiers) as well as a range
of charts, analysis and markdown-report outputs (you need to go run the
markdown elsewhere).
INPUTS:
wgts - dataframe where indices are asset classes & cols are portfolios
mu - vector of expected returns (as pd.Series)
vol - vector of expected volatilies (as pd.Series)
hist - dataframe of historic returns (NOT index px/levels)
cor - dataframe of correlation matrix
nsims - number of Monte Carlo simulations
psims - no of periods to run simulation over (default = 260w)
f - annualisation factor (default = 52 for weekly returns data)
MAIN FUNCTIONS:
empirical() - runs emperical sims for 1 vector of weights
sim_stats() - calc descriptive stats for 1 simulation output
port_stats() - rtn, vol & marginal-contribution given inputs
emperical_frontier() - runs empirical analysis across all ports in wgts
will do stochastic sims, sim_stats, port_stats & return
a dictionary with all the outputs (stored as self.results)
correl_rmt_filtered() - allows us to build RMT filtered correl matrix
for other correl work look at correls module
CHARTING FUNCTIONS:
plot_collection_all(): runs default plots for frontier & ports
plot_collection_frontier(): runs plots to analyse across portfolios
plot_collection_port(): runs plots to analyse timeseries of simulations
NB/ for details of individual charts go look below, or run collection
then load each plotly figures from the collection to see what it is
REPORTING:
In all cases we produce a templated markdown script with Plotly plots
already embedded as HTML - these can be fed to report_writer or anything
which turns markdown to a static html/pdf.
markdown_master(): combines frontier report with all ports; options avail
markdown_frontier_report(): frontier analysis
markdown_port_report(): individual portfolio report
DEVELOPMENT:
- check correlation matrix PSD in class properties
Author: <NAME>
"""
## Initialise class
def __init__(self, wgts, mu, vol, # these aren't optional
alpha=None, te=None, tgts=None, # optional
hist=None, cor=None, # Need something
nsims=1000, f=52, psims=260, # standard params
**kwargs):
### ORDER OF INITIALISATION IS IMPORTANT ###
### Non-optional class inputs
self.wgts = wgts
self.mu = mu # [has @property]
self.vol = vol # [has @property]
# From required inputs we set these
self.universe = mu.index # list of asset classes [has @property]
self.port_names = wgts.columns # useful to have names of portfolios
### Optional class inputs
# alpha - set to vector of zeros of None passed [has @property]
if alpha is None:
alpha = pd.Series(np.zeros(len(mu)), index=mu.index, name='alpha')
self.alpha = alpha
# tracking error - set to vector of zeros if None passed [has @property]
if te is None:
te = pd.Series(np.zeros(len(mu)), index=mu.index, name='te')
self.te = te
# tgts set to vector of of zeros of length the numper of portfolios
if tgts is None:
tgts = pd.Series(np.zeros(len(wgts.columns)),
index=wgts.columns,
name='tgts')
self.tgts = tgts
# Historical Timeseries Data & Correlation
# ORDER IMPORTANT HERE
# if hist provided set a default correlation matrix as RMT
# if cor also provided we then override the default
# this is a little inefficient, but meh... hardly matters
if hist is not None:
self.cor = self.correl_rmt_filtered(hist.corr())
self.hist = hist
# Override default correl (from hist) if cor specifically passed
if cor is not None:
self.cor = cor # check symmetrical in properties
### STANDARD SETTINGS
self.nsims = nsims # number of simulations
self.f = f # annualisation factor
self.psims = psims # no of periods in MC simulation
self.plots = dict() # initisalise dict for plotly plots (useful later)
## Update Plotly template
colourmap = ['grey', 'teal', 'purple', 'black', 'deeppink', 'skyblue', 'lime', 'green','darkorange', 'gold', 'navy', 'darkred',]
fig = go.Figure(layout=dict(
font={'family':'Garamond', 'size':14},
plot_bgcolor= 'white',
colorway=colourmap,
showlegend=True,
legend={'orientation':'v'},
margin = {'l':75, 'r':50, 'b':25, 't':50},
xaxis= {'anchor': 'y1', 'title': '', 'hoverformat':'.1%', 'tickformat':'.0%',
'showline':True, 'linecolor': 'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke',
},
yaxis= {'anchor': 'x1', 'title': '', 'hoverformat':'.1%', 'tickformat':'.0%',
'showline':True, 'linecolor':'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke'
},
updatemenus= [dict(type='buttons',
active=-1, showactive = True,
direction='down',
y=0.5, x=1.1,
pad = {'l':0, 'r':0, 't':0, 'b':0},
buttons=[])],
annotations=[{'text': 'Source: STANLIB Multi-Strategy',
'xref': 'paper', 'x': 0.5, 'ax': 0,
'yref': 'paper', 'y': 0.5, 'ay': 0,
'align':'right'}],))
# Save template
pio.templates['multi_strat'] = pio.to_templated(fig).layout.template
return
# %% CLASS PROPERTIES
# Expected Returns (mu) - Ideally pd.Series but MUST be a (1xN) vector
@property
def mu(self): return self.__mu
@mu.getter
def mu(self): return self.__mu
@mu.setter
def mu(self, x):
if isinstance(x, pd.Series):
x.name = 'mu'
elif len(np.shape(x)) != 1:
raise ValueError('mu input is non-vector: {} given'.format(x))
self.__mu = x
# Alpha (alpha) - Ideally pd.Series but MUST be a (1xN) vector
@property
def alpha(self): return self.__alpha
@alpha.getter
def alpha(self): return self.__alpha
@alpha.setter
def alpha(self, x):
if isinstance(x, pd.Series):
x.name = 'alpha'
elif len(np.shape(x)) != 1:
raise ValueError('alpha input is non-vector: {} given'.format(x))
self.__alpha = x
# Volatility (vol) - Ideally pd.Series but MUST be a (1xN) vector
@property
def vol(self): return self.__vol
@vol.getter
def vol(self): return self.__vol
@vol.setter
def vol(self, x):
if isinstance(x, pd.Series):
x.name = 'vol'
elif len(np.shape(x)) != 1:
raise ValueError('vol input is non-vector: {} given'.format(x))
self.__vol = x
# Tracking Error (te) - Ideally pd.Series but MUST be a (1xN) vector
@property
def te(self): return self.__te
@te.getter
def te(self): return self.__te
@te.setter
def te(self, x):
if isinstance(x, pd.Series):
x.name = 'te'
elif len(np.shape(x)) != 1:
raise ValueError('te input is non-vector: {} given'.format(x))
self.__te = x
# Correlation Matrix
# Currently just check if symmetrical
# Add test positive semi-definate
@property
def cor(self): return self.__cor
@cor.getter
def cor(self): return self.__cor
@cor.setter
def cor(self, x):
if x.shape[0] != x.shape[1]:
raise ValueError('Correl Matrix non-symmetrical: {} given'.format(x))
self.__cor = x
# nsims - number of simulations to run - needs to be an integer
@property
def nsims(self): return self.__nsims
@nsims.getter
def nsims(self): return self.__nsims
@nsims.setter
def nsims(self, x):
if not isinstance(x, int):
raise ValueError('nsims needs to be an integer: {} given'.format(x))
self.__nsims = int(x)
# psims - number of periods per MC Sim - needs to be an integer
@property
def psims(self): return self.__psims
@psims.getter
def psims(self): return self.__psims
@psims.setter
def psims(self, x):
if not isinstance(x, int):
raise ValueError('psims needs to be an integer: {} given'.format(x))
self.__psims = int(x)
# f - annualisation factor needs to be an integer
@property
def f(self): return self.__f
@f.getter
def f(self): return self.__f
@f.setter
def f(self, x):
if not isinstance(x, int):
raise ValueError('annualisation factor needs to be an integer: {} given'.format(x))
self.__f = int(x)
# %% Emperical Bootstrap
def empirical(self, **kwargs):
""" Monte-Carlo Simulation using Scaled Empirical Data
Jacos idea to take the historical timeseries and standardise, then once
standardised we can input our own means and volatility estimates. This
will maintain higher moments (skew & kurtosis) from the original ts
but allow us to use our own forward estimates.
Note a serious problem of this approach is the length of the historical
data. Correlation is essentially taken by picking x random periods from
this data - as a result we are literally just recycling the same periods
over and over in a new order making this analysis less useful for longer
simulations or sims where this historical period is short.
OUTPUT:
pd.DataFrame with each simulation being a row (starting at 0) and
each column being a period along the sim. Column[0] representing
time-0 is set at a portfolio value of 1
INPUTS:
w = vector of port wgts ideally pd.Series()
mu = vector of exp rtns idieally pd.Series()
alpha (OPTIONAL) = vector of asset class alpha expectations
vol = vector of annualised volatilities
te (OPTIONAL) tracking error of alpha sources to asset class beta
f = int() annualisation factor (default=52)
nsims = int() number of simulations
psims = int() no of observation periods simulation
DEVELOPMENTS:
* Correlation of Alpha sources == 0; could incorporate alpha correl matrix
* Converting hist_rtns to np.array may speed things up; rest is already np
Author: Jaco's brainpower; adapted by David
"""
## INPUTS
w = kwargs['w'] if 'w' in kwargs else self.w
mu = kwargs['mu'] if 'mu' in kwargs else self.mu
vol = kwargs['vol'] if 'vol' in kwargs else self.vol
hist = kwargs['hist'] if 'hist' in kwargs else self.hist
nsims = kwargs['nsims'] if 'nims' in kwargs else self.nsims
f = kwargs['f'] if 'f' in kwargs else self.f
psims = kwargs['psims'] if 'psims' in kwargs else self.psims
## OPTIONAL INPUTS
alpha = np.zeros(len(w)) if 'alpha' not in kwargs else kwargs['alpha']
te = np.zeros(len(w)) if 'te' not in kwargs else kwargs['te']
# De-Annualise Returns & Vols
mu, alpha = (mu / f), (alpha / f)
vol, te = (vol / np.sqrt(f)), (te / np.sqrt(f))
# Re-scale historical return series
std_rtn = (hist - hist.mean()) / hist.std() # standardise
std_rtn = std_rtn.mul(vol, axis=1).add(mu, axis=1) # re-scale
for i in range(0, nsims):
#irtn = std_rtn.iloc[:simlength]
irtn = std_rtn.sample(psims)
ialpha = np.random.normal(alpha, te, (psims, len(w)))
irtn = irtn + ialpha
# Build simulated path & add to simulations array
path = (1 + (irtn @ w)).cumprod(axis=0)
# create sims array on 1st iteration
# add to sims stack on further iterations
if i == 0:
sims = path
else:
sims = np.vstack((sims, path))
# Convert to DataFrame - adjust columns to start at 1 not 0
# insert vec PortValue==1 at col.0; concat because pd.insert is crap
df = pd.DataFrame(sims, columns=range(1, psims+1))
v1 = pd.DataFrame(np.ones((nsims, 1)), columns=[0])
# round on the output to save space in chart memory later
return pd.concat([v1, df], axis=1).round(5)
def sim_stats(self, sims, tgt=0, method='annualise', **kwargs):
""" Descriptive Statistics for dataframe of Monte Carlo Sims
INPUTS:
sims - df with rows as sims; columns as periods along sim path
tgt - numerical return bogie (default = 0)
periods - list periods on sim path to calc (default = all > 1yr)
note column varnames must be numerical as used in annualisation
method annualise (default) - annualises periodic return
relative - subtracts annualised return by target
terminal - looks at terminal value
Author: <NAME>
"""
# percentiles we want to see
pc = [0.01, 0.05, 0.1, 0.25, 0.4, 0.5, 0.6, 0.75, 0.9, 0.95, 0.99]
# periods from simulations to analyse
if 'periods' in kwargs:
periods = kwargs['periods']
else:
periods = sims.columns[self.f:]
## SUBSET & ANNUALISE
# subset sims to required dates; if len(periods) is 1 this will return
# a pandas series so need to convert back to df (to access cols later)
sims = sims.loc[:, periods]
sims = sims.to_frame() if isinstance(sims, pd.Series) else sims
anns = sims ** (self.f / sims.columns) - 1 # annualised rtns from paths
# Alternative calc methods available
# 0. 'annualise' (default) annualised returns
# 1. relative reduces returns by target (& tgt == 0)
# 2. terminal assumes portval rtns to sims (& tgt == 1)
if method == 'relative':
anns, tgt = (anns - tgt), 0
elif method == 'terminal':
anns, tgt = sims, 1
# Stats (not computed by pd.describe)
stats = pd.DataFrame(index=anns.columns)
stats['median'] = anns.median()
stats['skew'] = anns.skew()
stats['kurtosis'] = anns.kurtosis()
stats['target'] = tgt
stats['prob'] = anns[anns > tgt].count() / anns.count()
return pd.concat([stats.T, anns.describe(percentiles=pc)], axis=0)
def port_stats(self, w=None, mu=None, vol=None, cor=None, **kwargs):
""" Portfolio Risk & Return Stats including MCR
NB/ This function ought to be elsewhere in the package; it may therefore
get replicated and then removed in the fullness of time.
INPUT:
w - wgts df with assets in index & ports as cols
mu - pd.Series of expected returns
vol - pd.Series of volatilities
cor - np.array or pd.Dataframe correlation matrix
OUTPUT: dictionary with keys risk, rtn, mcr, tcr & pcr
REFERENCES:
[1] http://webuser.bus.umich.edu/ppasquar/shortpaper6.pdf
Author: David (whilst loitering in the Scottish sunshine)
"""
## INPUTS - from self if None provided
if w is None: w = self.wgts
if mu is None: mu = self.mu
if vol is None: vol = self.vol
if cor is None: cor = self.cor
## CALCULATIONS
rtn = w.multiply(mu, axis=0).sum() # expected return
# convert w to dataframe if series passed
# this is a problem with the change in dimensions
if isinstance(w, pd.Series):
w = w.to_frame()
wa = np.array(w) # wgts to arrays for matrix algebra
vcv = np.diag(vol) @ cor @ np.diag(vol) # covariance matrix
v = np.sqrt(np.diag(wa.T @ vcv @ wa)) # portfolio volatility
# Marginal Contribution to Risk
# where MCR = (w.T * VCV) / vol
mcr = np.transpose((wa.T @ vcv) / v.reshape((w.shape[1],1)))
mcr.columns, mcr.index = w.columns, mu.index
tcr = mcr * wa # total contibution to risk
pcr = tcr / v # percentage TCR (sums to 100%)
# convert vol pack to pandas series
v = pd.Series(data=v, index=[rtn.index])
# Ingest to class
self.port_rtn = rtn
self.port_vol = v
self.mcr = mcr
self.tcr = tcr
self.pcr = pcr
return dict(risk=v, rtn=rtn, mcr=mcr, tcr=tcr, pcr=pcr)
def empirical_frontier(self, wgts=None, tgts=None, alpha=True, **kwargs):
""" Runs Stochastic Modelling on whole Frontier
Frontier here refers to any set of portfolio weights - original use case
was to run analysis on each port on an MVO efficient frontier
"""
## INPUTS
# pull wgts dataframe from self if None provided
wgts = self.wgts if wgts is None else wgts
# Return Targets can be provided, pulled from object or zeros
if tgts is None:
# if None provided grab tgts from self
tgts = self.tgts
elif tgts == 0:
# array of zeros otherwise
tgts = np.zeros(wgts.shape[1])
# Alpha
# Remember that alpha & te are set ONLY via kwargs in emperical bootstrap
# For frontier if alpha is false create 2x series of zeros
if alpha:
alpha, te = self.alpha, self.te
else:
alpha = pd.Series(name='alpha', index=wgts.index,
data=np.zeros(wgts.shape[0]))
te = pd.Series(name='te', index=wgts.index,
data=np.zeros(wgts.shape[0]))
# Output storage dictionary
# keys as names of ports being tested, values will be dicts themselves
data = dict.fromkeys(wgts.columns)
# port_stats() works on whole frontier anyway to do before iteration
portstats = self.port_stats(w=wgts) # NB/ not part of MC sim
## iterate across frontier (columns of wgts df)
for i, port in enumerate(wgts):
# Pull inputs from self - for the bootstrap
# Not technically required; useful to store so we know exactly
# which inputs went into a given model
# also append some portstats stuff (MCR, TCR, PCR) which is useful
# although not used at all in the stochastic modelling
df = pd.concat([wgts[port], self.mu, self.vol, alpha, te,
pd.Series(portstats['mcr'][port], name='mcr'),
pd.Series(portstats['tcr'][port], name='tcr'),
pd.Series(portstats['pcr'][port], name='pcr'),
], axis=1)
# rename wgt vector to w (rather than the port name)
df.rename(columns={port:'w'}, inplace=True)
# Run simulation. Pass wgts but take f, nsims, psims from self
# For emperical() alpha is set via kwarg, send zeros if alpha False
sims = self.empirical(w=wgts[port], alpha=alpha, te=te)
# annualised returns from sim paths
# and descriptive stats
annsims= sims ** (self.f / sims.columns) - 1
simstats= self.sim_stats(sims, tgt=tgts[i])
irisk = pd.Series([df.w.T @ df.mu, # portfolio return
df.w.T @ df.alpha, # alpha rtn
df.tcr.sum()], # vol
index=['port_rtn', 'alpha_rtn', 'port_vol'],
name=port)
# Dictionary of all useful stuff for this simulation
port_data = dict(inputs=df,
sims=sims,
annsims=annsims,
stats=simstats,
tgt=tgts[i],
risk_rtn=irisk)
# merge ith port dictionary with output dict
data[port] = port_data
self.results = data # save results to self
return data
# %% Correlation Functions
# This may be duplicated elsewhere in the package
# Ideally we'd put it in our correlation functions group
def correl_rmt_filtered(self, c=None, from_returns=False):
""" Create Random Matrix Theory Filtered CoVariance
Props to Jaco who showed me how to do this. We de-noise our input
correlation matrix by comparing the eigns from a PCA to the eigns
or a randomly generated matrix. Then we scale the original matrix
by removing and eigen vectors where the eignenval < random matrix.
INPUTS:
c - correlation matrix as dataframe (ideally)
None (default) reverts to self.cor
Author: <NAME> (sort of) but credit goes to Jaco; Sept 2020
"""
if c is None:
c = self.cor
# Use as a flag to determine if input is a dataframe or not
# if so we convert back to dataframe later
pandapower = True if isinstance(c, pd.DataFrame) else False
# find ordered eigenvalues of input corr matrix
w0, v0 = self._ordered_eig(c)
# Generate multivariate gaussian of the same size
# then find eigens of random returns matrix
RANDRETS = np.random.standard_normal(size = c.shape)
rand_cor = np.corrcoef(RANDRETS, rowvar=False)
wR, vR = self._ordered_eig(rand_cor)
#If the eigenvalue larger than the eigen from random matrix include, else set zero
w = []
for e0, eR in np.c_[w0, wR]:
if e0 > eR:
w.append(e0)
else:
w.append(0)
D = np.diag(np.array(w))
# Recover correlation matrix from filtered eigen values and original vectors
# Set diagonals to one
c1 = v0 @ D @ v0.T
c1 = np.eye(c.shape[0]) - np.diag(np.diag(c1)) + c1
if pandapower:
c1 = pd.DataFrame(data=c1, index=c.index, columns=c.index)
return c1
def _ordered_eig(self, x):
""" Find Real ordered eigenvalues & vectors correlation matrix """
w, v = LA.eig(np.array(x)) # convert to array & find eig values/vectors
w = np.real(w) # convert complex numbers to real
idx = w.argsort()[::-1] # eigenvalues aren't ordered automatically
return w[idx].reshape(w.shape[0], 1), v[:,idx]
# %% BOOTSTRAP CHARTS
# Again the base for these charts may be replicated elsewhere, but seeing
# as we want these for reporting purposes we've storing them as distinct
def _px_addsource(self, fig, x=1, y=-0.095, align='right'):
return fig.add_annotation(
text="Source: STANLIB Multi-Strategy".format(),
xref='paper', yref='paper', x=x, y=y, ax=0, ay=0, align=align)
# Monte Carlo Simulation paths
def plot_paths(self, sims, tgt=0, maxpaths=2500,
xtitle='Periods', ytitle='Portfolio Value',
template='multi_strat',):
""" Plots Simulation Paths
Fairly generic MC Sims path, with an optional target return line - also
option is ability to cap max paths to stop bloating the chart.
INPUTS:
sims - sims dataframe OR str name of port in self.results
tgt - (default tgt = 0) for return bogie
f - annualisation factor; default is None which uses self.f
maxpaths - cap on paths
xtitle & ytitle - fairly obvious
template - (default multi_strat)
DEVELOPMENT:
- add colourscale based on rank or terminal value
"""
# check sims
# if sims is a string we assume it's a port name acessable via results
# othereise assume a sims dataframe has been passed
if isinstance(sims, str):
sims = self.results[sims]['sims']
else:
sims = sims
## BASIC ADMIN
l, n = sims.shape
sims = sims - 1
colour_rgb = 'rgba(180,180,180,{})'.format(0.2) # grey with light opacity
## Full Paths Chart
fig = px.line(title='Stochastic Return Paths; {}-simulations'.format(l),
template=template)
fig.update_layout(showlegend=False)
# Append sims
for i, v in enumerate(sims.index):
# plot gets mad (& takes a lot of memory) when nsims is large
# we can constrain paths to a max number with maxpaths
if i >= maxpaths:
continue
# iteration line
fig.add_scatter(x=sims.columns,
y=sims.iloc[i, :],
name="",
line={'color':colour_rgb, 'width': 0.5})
fig.update_layout(
yaxis={'anchor':'x1', 'title':ytitle, 'hoverformat':'.1%', 'tickformat':'.0%',},
xaxis={'anchor':'y1', 'title':xtitle, 'hoverformat':'.0f', 'tickformat':'.0f',})
# target return path
if tgt != 0:
tgt_rtn = [1] + [(1 + tgt) ** (1 / self.f)] * (n + 1)
fig.add_scatter(x=sims.columns,
y=(np.cumprod(tgt_rtn)-1),
line={'color':'teal'})
return fig
def plot_histogram(self, annsims=None, portrange=False, tgt=0,
periods=[52], nbins=100,
opacity=0.5,
title='Probability Return Distributions',
template='multi_strat',
**kwargs):
""" Histogram of Return Distributions with Boxpot Overlay
INPUTS:
annsims: None implies entire frontier defined by self.port_names
dataframe of simulations with annualised returns OR
str() name of port in self.results
tgt: (default tgt = 0) for return bogie; will plot vertical line
portrange:
False - (default) annsims is single port_sim & we are plotting
hist for 1 of more periods
True - annsims is a df with multiple portfolio & single period
periods: [52] (default) but multiple periods available in list
if going across frontier will set to MAX value in periods
nbins: number of bins for historgram
title: obvious
opacity: 0-1 (default = 0.5) go lower with more histos on plot
template: (default multi_strat)
**kwargs: will feed directly into px.histogram()
"""
# annsims can be the actual sims to be plotted on histrogram or
# a string with the name of a port in self.results or
# None in which case we assume a single period, but the whole frontier
# frontier is defined by self.port_names which is normally the same
# as self.results.keys() but allows a subset if we have lots of ports
# and only want a smaller number of those results on the frontier
if annsims is None:
# if going across frontier we can't have more than 1 period
# then we iterate through the frontier
# grab the period vector from the ith port & concat to a dataframe
periods = [max(periods)] if len(periods) > 0 else periods
for i, k in enumerate(self.port_names):
x = self.results[k]['annsims'].iloc[:, periods]
if i == 0:
df = pd.DataFrame(x)
else:
df = pd.concat([df, x], axis=1)
df.columns = self.port_names
annsims = df # set annsims as the dataframe of sims now
# using frontier also means portrange must be true
portrange = True
elif isinstance(annsims, str):
# If input was a str assume its a portfolio from self.results
annsims = self.results[annsims]['annsims'].iloc[:, periods]
else:
annsims = annsims.loc[:, periods] #subset data
# reshape for plotly express (stacked format)
df = annsims.stack().reset_index()
if portrange:
# assumes passed multiple portfolio as same time period
# rather than 1-portfolio at multiple periods along a simulation
df.columns = ['sim', 'port', 'returns']
colour='port'
else:
# converting period to string stops the box thing having a shit fit
df.columns = ['sim', 'period', 'returns']
df['period'] = 'p-' + df['period'].astype(str)
colour='period'
# Actual Histogram
fig = px.histogram(df, x='returns', color=colour,
nbins=nbins,
marginal="box",
histnorm='probability',
histfunc='avg',
title=title,
template=template,
opacity=opacity,
**kwargs)
# overlay rather than stacked historgram
fig.update_layout(barmode='overlay')
# Update Axis
fig.update_layout(yaxis= {'title':'Probability', 'hoverformat':'.1%', 'tickformat':'.0%',},
xaxis= {'title':'Annualised Return', 'hoverformat':'.1%', 'tickformat':'.1%',})
# Add Return Target Vertical Line annotation
if tgt != 0:
fig.update_layout(shapes=[dict(type='line',
line={'color':'teal', 'dash':'solid'},
yref='paper', y0=0, y1=0.98, xref='x', x0=tgt, x1=tgt)])
fig.add_annotation(text="Return Target {:.1%}".format(tgt),
xref='x', x=tgt, yref='paper', y=1 , ax=0, ay=0)
fig = self._px_addsource(fig)
return fig
def plot_box(self, annsims=None, periods=[52, 156, 260],
points=False, boxmean='sd',
title='Return Distribution Box Plot',
template='multi_strat'):
""" Returns Box PLot
INPUTS:
annsims: None implies entire frontier defined by self.port_names
dataframe of simulations with annualised returns OR
str() name of port in self.results
portrange:
False - (default) annsims is single port_sim & we are plotting
hist for 1 of more periods
True - annsims is a df with multiple portfolio & single period
periods: [52] (default) but multiple periods available in list
if going across frontier will set to MAX value in periods
points: False(default)|'outliers'|True
Shows side plot with datum; looks silly with large nsims
boxmean: 'sd'(default)|True|False
Includes dashed mean line in box & diamond for 'sd'
Turns notched median off because it looks stupid
title: obvious
template: (default multi_strat)
"""
# annsims can be the actual sims to be plotted on histrogram or
# a string with the name of a port in self.results or
# None in which case we assume a single period, but the whole frontier
# frontier is defined by self.port_names which is normally the same
# as self.results.keys() but allows a subset if we have lots of ports
# and only want a smaller number of those results on the frontier
if annsims is None:
# if going across frontier we can't have more than 1 period
# then we iterate through the frontier
# grab the period vector from the ith port & concat to a dataframe
periods = [max(periods)] if len(periods) > 0 else periods
for i, k in enumerate(self.port_names):
x = self.results[k]['annsims'].iloc[:, periods]
if i == 0:
df = pd.DataFrame(x)
else:
df = pd.concat([df, x], axis=1)
df.columns = self.port_names
annsims = df # set annsims as the dataframe of sims now
# using frontier also means portrange must be true
portrange = True
elif isinstance(annsims, str):
# If input was a str assume its a portfolio from self.results
annsims = self.results[annsims]['annsims'].iloc[:, periods]
else:
annsims = annsims.loc[:, periods] #subset data
# reshape for plotly express (stacked format)
df = annsims.stack().reset_index()
if portrange:
# assumes passed multiple portfolio as same time period
# rather than 1-portfolio at multiple periods along a simulation
df.columns = ['sim', 'port', 'returns']
colour='port'
else:
# converting period to string stops the box thing having a shit fit
df.columns = ['sim', 'period', 'returns']
df['period'] = 'p-' + df['period'].astype(str)
colour='period'
# Actual Histogram
fig = px.box(df, x=colour , y='returns', color=colour,
points=points, notched=True,
title=title,
template=template)
if boxmean is not None:
fig.update_traces(boxmean=boxmean, notched=False)
# Update Axis
fig.update_layout(yaxis= {'title':'Annualised Return', 'hoverformat':'.1%', 'tickformat':'.0%',},
xaxis= {'title':'Portfolio', 'hoverformat':'.1%', 'tickformat':'.1%',})
fig = self._px_addsource(fig)
return fig
def plot_ridgeline(self, annsims=None, traces=[52, 104, 156, 208, 260],
side='positive', meanline=True, box=False, width=3,
title='Ridgeline KDE Distributions',
template='multi_strat', **kwargs):
""" Ridgeline Plot
Each specified column (via traces) is converted to KDE distribution
ploted as seperate trace going up the y-axis
INPUTS:
annsims: None implies single period across all self.results
dataframe of simulations with annualised returns OR
str() name of port in self.results
traces: columns from annsims to turn into ridgelines
for whole frontier we can only have a single period - if
traces list len() > 1 we'll pick the MAX len period
width: (default = 3) is the height
meanline: True(default)|False - pops a vertical mean in ridge
box: True|False(default) - box-plot within ridge
side: 'postive'(default)|'negative' - ridges going up or down
**kwargs: will feed directly into px.histogram()
REFERENCES:
https://mathisonian.github.io/kde/
https://jakevdp.github.io/PythonDataScienceHandbook/05.13-kernel-density-estimation.html
DEVELOPMENT:
- look at the hoverdata, probs aren't what I'd like
"""
# number of colours on ridge line - silly point
ncolours = len(traces)
# annsims can be the actual sims to be plotted on the ridgeline or
# a string with the name of a port in self.results or
# None in which case we assume a single period, but the whole frontier
# frontier is defined by self.port_names which is normally the same
# as self.results.keys() but allows a subset if we have lots of ports
# and only want a smaller number of those results on the frontier
if annsims is None:
# if going across frontier we can't have more than 1 period
# then we iterate through the frontier
# grab the period vector from the ith port & concat to a dataframe
traces = [max(traces)] if len(traces) > 0 else traces
for i, k in enumerate(self.port_names):
x = self.results[k]['annsims'].iloc[:, traces]
if i == 0:
df = pd.DataFrame(x)
else:
df = pd.concat([df, x], axis=1)
df.columns = self.port_names
annsims = df # set annsims as the dataframe of sims now
ncolours = len(annsims.columns) # update ncolours
elif isinstance(annsims, str):
# grab from self if a string input provided for annualised simulations
annsims = self.results[annsims]['annsims'].iloc[:, traces]
else:
# subset the data, there is a funny here is the trace list is numerical
# we first try an iloc and then do a loc if the iloc fails
try:
annsims = annsims.iloc[:, traces] # iloc for numerical
except:
annsims = annsims.loc[:, traces] # loc for string list
# create a blended colours list- here is teal to purple
from plotly.colors import n_colors
colors = n_colors('rgb(0, 128, 128)',
'rgb(128, 0, 128)',
ncolours,
colortype='rgb')
# blank plotly express template
fig = px.scatter(title=title, template=template)
for i, v in enumerate(annsims): # add violin plots as traces
vn = "p-{:.0f}".format(v) if type(v) == int else v
fig.add_trace(go.Violin(x=annsims.iloc[:,i],
line_color=colors[i],
line_width=1,
name=vn,
spanmode='soft',))
# convert from violins to horizontal kde charts
fig.update_traces(orientation='h',
side=side,
meanline_visible=meanline,
width=width,
box_visible=box)
# update layouts
fig.update_layout(
yaxis= {'anchor':'x1', 'title':'Simulation', 'hoverformat':':.1%', 'tickformat':':.0%',},
xaxis= {'anchor':'y1', 'title':'Annualised Return'})
fig = self._px_addsource(fig)
return fig
# Density Heatmap showing simulated returns through time
def plot_densitymap(self, sims, f=None,
title='Density Heatmap',
xtitle='Simulation Period',
ytitle='Annualised Return'):
""" Density Heatmap of Simulations through time
WARNING THIS THING CAN BE MASSIVE - NEED TO SORT OUT WHY
x-axis is shows periods of simulation from 1-year (f) to the end
y-axis is set to either annualised return or excess return
colourscale is the probability
INPUTS:
sims - sims dataframe OR str name of port in self.results
f - annualisation factor; default is None which uses self.f
NB/ THIS IS STILL A WORK IN PROGRESS
- need to format colourscale to percentage
- think we could generalise and make more useful
"""
## INPUTS
sims = self.results[sims]['sims'] if isinstance(sims, str) else sims
f = self.f if f is None else f
sims = sims ** (f/sims.columns) - 1 # annualise returns
# set x axis bins from 1-year to end of simulation
# subset to monthly because the chart size gets quite bit (5mb)
nbinsx=sims.shape[1] - f
# stack for Plotly Express
sims = sims.iloc[:,f:].stack().reset_index()
sims.columns = ['sim', 'period', 'return']
# Heatmal
fig = px.density_heatmap(sims, x='period', y='return', nbinsx=nbinsx,
histnorm='percent', histfunc='avg',
labels={'period':xtitle,
'return':ytitle,},
title=title, template='multi_strat')
# Good options are 'Tealrose', 'Greys', 'Purples'
# https://plotly.com/python/builtin-colorscales/
fig.update_traces(dict(colorscale ='Tealrose',
reversescale = False,
showscale=True,
coloraxis=None),)
# formatting
fig.update_layout(yaxis={'hoverformat':'.1%', 'tickformat':'.0%',},
xaxis={'hoverformat':':.0f', 'tickformat':':.0f',},)
# Update hoverdata - NB X and Y are HARDCODED
fig['data'][0]['hovertemplate'] = 'Sim Period=%{x}<br>Annualised Return=%{y}<br>Prob=%{z}<extra></extra>'
fig = self._px_addsource(fig)
return fig
def _colourway_rgba(self, c1= 'rgb(0,128,128)', c2= 'rgba(128,0,128)',
n = 10, opacity=1):
""" Creates list of colours between two RGB inputs - with opactity
Use plotlys gradient thing to get colours between teal and purple
go.Scatter won't let us change opacity directly but we can via rgba
plotly colours gives rga NOT rgba - append an opacity alpha to the output
Used in Funnel Plots!
"""
from plotly.colors import n_colors
# Use plotlys gradient thing to get colours between teal and purple
colours = n_colors(c1, c2, n, colortype='rgb')
# Update string to change to rgba then append alpha for opacity
for i, c in enumerate(colours):
c = c[:3] + 'a' + c[3:] # convert from rgb to rgba
# insert opacity alpha into the string
idx = c.find(')')
colours[i] = c[:idx] + ", {}".format(opacity) + c[idx:]
return colours
def plot_convergence(self, frontier=True, port=None, opacity = 0.2,
title='Simulated Confidence Funnel',
template='multi_strat', **kwargs):
""" Area fill showing confidence intervals over time
INPUTS:
frontier: True(default)|False
when True we use self.port_names & the 25%-75% confidence
when False we need port to be a string with a portfolio name
port: str() portfolio name only used if frontier == False
"""
# In collecting data we need to know if this is a frontier or single port
# frontier - 25%-75% range for multiple portfolios
# port - more pairs but only 1 port
if frontier:
# create 2 dataframes for the upper & lower quartiles of data
# iterate through results
for i, port in enumerate(self.port_names):
u0 = self.results[port]['stats'].T.loc[:,'25%']
l0 = self.results[port]['stats'].T.loc[:,'75%']
if i == 0:
u = pd.DataFrame(u0)
l = pd.DataFrame(l0)
else:
u = pd.concat([u, u0], axis=1)
l = pd.concat([l, l0], axis=1)
# update column headers (which will say eg. 25% ) to port_names
u.columns = self.port_names
l.columns = self.port_names
# when we build the chart we iterate to add traces
# zip to ensure tuples for upper and lower
pairs = zip(self.port_names[::-1], self.port_names[::-1])
ncolours = len(u.columns) # number of colours we need
else:
# on a single port this isn't required, but to have a single function we do
u = self.results[port]['stats'].T
l = self.results[port]['stats'].T
pairs = [('5%', '95%'), ('10%', '90%'), ('25%', '75%'), ('40%', '60%'), ('50%', '50%')]
ncolours = len(pairs)
# create colourway between teal and purple
colors = self._colourway_rgba('rgb(0, 128, 128)', 'rgba(128, 0, 128)',
ncolours, opacity)
### BUILD THE PLOT
# Set up dummy figure
fig = px.line(title=title, template='multi_strat', **kwargs)
fig.update_layout(
yaxis= {'anchor':'x1','title':'Annualised Return', 'hoverformat':':.1%', 'tickformat':':.1%',},
xaxis= {'anchor':'y1','title':'Simulation Period', 'hoverformat':':.0f', 'tickformat':':.0f',}, )
for i, v in enumerate(pairs):
# Add upper trace
fig.add_trace(go.Scatter(x=l.index, y=l.loc[:,v[0]],
line={'width':0}, fill=None,
showlegend=False,
name="{}".format(str(v[0])),))
fig.add_trace(go.Scatter(x=u.index, y=u.loc[:,v[1]],
line={'width':0},
fill='tonexty',
fillcolor=colors[i],
name="{}".format(str(v[1])),))
fig.update_layout(
yaxis= {'anchor':'x1','title':'Annualised Return', 'hoverformat':'.1%', 'tickformat':'.0%',},
xaxis= {'anchor':'y1','title':'Simulation Period', 'hoverformat':'.0f', 'tickformat':'.0f',}, )
fig = self._px_addsource(fig)
return fig
def plot_cone(self, port=None, tgt=0, period=260, opacity=0.15,
title='Simulated Portfolio Returns',
template='multi_strat', **kwargs):
""" Mash up between a paths plot & a convergence funnel
Takes simulated terminal annualised returns for different percentiles
then plots a funnel area chart showing styalised return paths given those
terminal values.
Important to note this is the smoothed return NOT any simulated path
INPUTS:
port: name from self.port_names
tgt: OPTIONAL (default=0) is the return bogie
period: point along simulation to extract annualised returns
opacity: how deep the colour is on the funnel.
"""
# percentile stats to pull put from stats table
pctle = ['5%', '10%','25%','40%','50%','60%','75%','90%','95%']
s = self.results[port]['stats'].loc[pctle, period]
# create smoothed port path for terminal percentile annualised returns
# NOTE this is NOT the actual simulation that hit that percentile
df = pd.DataFrame([s] * (period+1)) # repmat for no of periods
df.index = range(0, period+1) # reindex to show 0-260 for eg
df = (1 + df) ** (1 / 52) # de-annualise returns
df.iloc[0,:] = 1 # set port value
df = df.cumprod() # calculate return paths
pairs = [('5%', '95%'), ('10%', '90%'), ('25%', '75%'), ('40%', '60%')]
# create colourway between teal and purple
colors = self._colourway_rgba('rgb(0, 128, 128)', 'rgba(128, 0, 128)',
len(pairs), opacity)
# Dummy plot - note this only works with plotly-express >= 4.10
fig = px.line(title=title, template=template)
# Add Median Line & Historical backtest if including
fig.add_scatter(x=df.index, y=df.loc[:,'50%'], name='MEDIAN')
# target return path
if tgt != 0:
tgt_rtn = [1] + [(1 + tgt) ** (1 / self.f)] * (period + 1)
fig.add_scatter(x=df.index,
y=(np.cumprod(tgt_rtn)),
name='Target',
line={'dash':'dot', 'color':'black'})
for i, v in enumerate(pairs):
# Add upper trace
fig.add_trace(go.Scatter(x=df.index, y=df.loc[:,v[0]],
line={'width':0}, fill=None,
showlegend=False,
name="{}".format(v),))
fig.add_trace(go.Scatter(x=df.index, y=df.loc[:,v[1]],
line={'width':0},
fill='tonexty',
fillcolor=colors[i],
name="{}".format(v),))
fig.update_layout(
yaxis= {'anchor':'x1','title':'Portfolio Value',
'hoverformat':'.2f', 'tickformat':'.1f',},
xaxis= {'anchor':'y1','title':'Simulation Period',
'hoverformat':'.0f', 'tickformat':'.0f',}, )
fig = self._px_addsource(fig)
return fig
# Risk Return of Assets & Plotted Efficient Frontier
def plot_frontier(self, w=None, mu=None, vol=None, cor=None, template='multi_strat'):
""" Risk Return Scatter Plot with Efficient Frontier of Portfolios """
## IMPORTS
w = self.wgts if w is None else w
mu = self.mu if mu is None else mu
vol = self.vol if vol is None else vol
cor = self.cor if cor is None else cor
# Basics
idx = w.sum(axis=1) != 0 # assets with portfolio positions
vcv = np.diag(vol) @ cor @ np.diag(vol) # covariance matrix
# Risk Return Scatter
fig = px.scatter(x=vol[idx], y=mu[idx], text=w[idx].index,
#hover_data=[w[idx].index],
labels={'x': 'Volatility',
'y': 'Expected Return',
'text': 'Asset Class'},
title='Risk Return Chart',
template=template)
# Update Asset Class Labels
fig.update_traces(textposition='top center', textfont_size=10)
fig.update_traces(marker=dict(size=8, line=dict(width=0.5, color='DarkSlateGrey')),)
# Portfolio Stats Dataframe
port_rtn = w.multiply(mu, axis=0).sum().values
port_vol = [np.sqrt(w[i].values @ vcv @ w[i].values.T) for i in w]
df = pd.DataFrame([port_rtn, port_vol], columns=w.columns, index=['port_rtn', 'port_vol']).T
# Add Efficient Frontier
fig.add_scatter(x=df.loc[:,'port_vol'], y=df.loc[:,'port_rtn'],
hovertext=df.index,
marker=dict(size=10, line=dict(width=0.5), symbol='diamond'),
name='Portfolios')
fig = self._px_addsource(fig)
return fig
# Stacked bar chart for portfolio weights
def plot_wgts_bar_stacked(self, wgts=None,
title='Frontier Portfolio Weights',
ytitle='Port Weight',
template='multi_strat'):
""" Stacked Bar Chart with Portfolio Weights of Multiple Portfolios
Originally designed for weights - but works for any stacked bar.
We also use this for MCR, TCR, PCR charts by setting wgts = self.pcr
"""
if wgts is None:
wgts = self.wgts
# Stack end on end for plotly & rename columns (for colours)
df = wgts[wgts.sum(axis=1) != 0].stack().reset_index()
df.columns = ['Asset', 'port', 'w']
# plotly bar chart with Plotly Express
fig = px.bar(df, x='port', y='w', color='Asset',
title=title,
labels={'port':'Portfolio',
'w':ytitle,
'Asset':'Asset Class'},
template=template,)
fig = self._px_addsource(fig)
return fig
# Correlation Heatmap
def plot_correl(self, cor=None, title='Correlation Matrix', aspect='auto',
colorscale='Tealrose', reversescale=False, **kwargs):
""" Plotly Heatmap with Overlay annotations """
# Pull correlation matrix from Bootsrap class
if cor is None:
cor = self.cor
## Basic plotly express imshow heatmap
fig = px.imshow(cor,
x=cor.index, y=cor.index,
labels={'color':'Correlation'},
title=title,
color_continuous_midpoint=0, # change for VCV
aspect=aspect,
**kwargs)
## Formatting
fig.update_layout(margin = {'l':25, 'r':50, 'b':0, 't':50},)
# format to 2dp by editing the z axis data
fig['data'][0]['z'] = np.round(fig['data'][0]['z'], 2)
# Heatmap colour - I rather like Tealrose
fig.update_traces(dict(colorscale=colorscale, reversescale=reversescale,
showscale=False, coloraxis=None),)
# By default plotly imshow doesn't give values, so we append
# Each value is a unique annotation
# iterate through columns & rows (which is a bit silly)
N = cor.shape[0]
for i in range(N):
for j in range(N):
fig.add_annotation(text="{:.2f}".format(cor.iloc[i,j]),
font={'color':'black', 'size':9},
xref='x',yref='y',x=i,y=j,ax=0,ay=0)
return fig
# %% PLOTLY TABLES
### I REALLY HATE THESE AND WE NEED A BETTER METHOD OF SHOWING TABLES
def plot_table(self, method='wgts', port=None, title=""):
""" Table with Headers, Index & Totals Row
INPUT:
method: None|'risk'|'wgts'(default)
None will assume port is dataframe and use that
'risk' takes the inputs from self.results for a given port
'wgts' takes the inputs from self.wgts
port: should be a string with the name of a port for 'risk'
otherwise it's a dataframe if no method provided
"""
# where no method provided assume port is a dataframe
if method is None:
df = port
# useful methods for reporting
# here we build a table with w, mu, risk etc.. inc. totals
elif method == 'risk':
# grab modellling inputs table from self.results
df = self.results[port]['inputs']
df = df[df.w != 0]
# need to add a totals row
tot = df.sum(axis=0)
tot.name = 'TOTAL'
# portfolio expected return & alpha
tot.loc['mu'] = df.w.multiply(df.mu, axis=0).sum()
tot.loc['alpha'] = df.alpha.multiply(df.mu, axis=0).sum()
# set volatility and mcr to zero - the sum is meaningless
tot.loc['vol', 'mcr'] = 0
df = df.append(tot) # append to dataframe
elif method == 'wgts':
df = self.wgts # pull weights from class
df = df[df.sum(axis=1) != 0] # remove assets with zero wgt
df = df.append(pd.Series(df.sum(axis=0), name='TOTAL'))
### MAKE PLOTLY TABLE
# Alternating Row Colours
# create repeating pattern of desired length
# make a grey total row as well
if df.shape[0] % 2 == 0: # even
rc = ['white', 'whitesmoke'] * int((df.shape[0]+1)/2)
rc[-1] = 'grey'
else: # odds
rc = ['white', 'whitesmoke'] * int(df.shape[0]/2)
rc = rc + ['grey']
# Form table
fig = go.Figure(data=[go.Table(
columnwidth = [100, 50],
header=dict(values=list(df.reset_index().columns),
fill_color='black',
line_color='darkslategray',
font={'color':'white', 'size':11},
align=['left', 'center']),
cells=dict(values=df.reset_index().T,
format=[[], ['.2%']], # column text formatting
fill_color = ['teal', rc,],
line_color='darkslategray',
align=['left', 'center'],
font={'color':['white', 'black'], 'size':11},))])
# formattingn updates
fig.update_layout(title=title,
height=((df.shape[0] + 1) * 30), # change height
margin = {'l':50, 'r':50, 'b':5, 't':50})
return fig
def plot_stats_table(self, port, periods=[52, 156, 260], title=''):
""" Simulation Descriptive Stats Table
Plotly table with simulation period in the index and stats of columns
REFERENCES:
https://plotly.com/python/table/
AUTHOR: David - but don't ask him to fix it
"""
stats = self.results[port]['stats']
stats = stats.loc[:, periods]
# Index & Formatting for Stats Table
z = [('mean', '.1%'), ('median', '.1%'),
('prob', '.0%'), ('std', '.1%'),
('skew', '.2f'), ('kurtosis', '.2f'),
('5%', '.1%'), ('10%', '.1%'), ('25%', '.1%'), ('40%', '.1%'), ('50%', '.1%'),
('60%', '.1%'),('75%', '.1%'), ('90%', '.1%'), ('95%', '.1%'),]
idx, fmt = zip(*z) # unzip
stats = stats.loc[idx, :].T # susbset stats table & flip horizontal
# plotly table
fig = go.Figure(data=[go.Table(
columnwidth = [100, 60],
header=dict(values=['Sim Period'] + list(stats.columns),
fill_color='black',
line_color='darkslategray',
font={'color':'white', 'size':11},
align=['left', 'center']),
cells=dict(values=stats.reset_index().T,
format=[[], [list(fmt)]], # column text formatting
fill_color = ['teal', ['white','whitesmoke']*50],
line_color='darkslategray',
align=['left', 'center'],
font={'color':['white', 'black'], 'size':11},))])
fig.update_layout(title=title,
width=825,#((stats.shape[1] + 1) * 60),
height=((stats.shape[0] + 1) * 40),
margin = {'l':5, 'r':5, 'b':5, 't':50}) # change width
return fig
# %% Plot Collections - saves lots of plots to self.plots()
def plot_collection_all(self, plot_height=450, plot_width=850):
""" Run port_collection functions for frontier & all portfolios
In each case plots will be returned to self.plots which is a dictionary
frontier will be at self.plots['frontier'] while the rest will have the
port_name as the key.
look at guide for plot_collection_frontier() & plot_collection_port()
for details of exactly which plots are run... this function is just
for the default settings.
INPUTS:
plot_height & plot_width - go into fig.to_html() as inputs
NB/ these only feed in where a plot hasn't been sized already, so it
won't override an existing explicit size input.
"""
# python seems to prefer grabing ones self, manipulating & stuffing back
# create dict of plots from self
plots = self.plots
# iterate through all plots in the self.results dictionary
for port in self.results.keys():
p = self.plot_collection_port(port=port,
plotly2html=True,
digest=True,
plot_height=plot_height,
plot_width=plot_width)
plots[port] = p
# Run frontier & digest
f = self.plot_collection_frontier(plotly2html=True, digest=True)
plots['frontier'] = f
# ingest plots back to self
self.plots = plots
return "Mega plot run smashed - look in self.plots"
def plot_collection_frontier(self, showplots=False,
plotly2html=True, plotlyjs='cdn',
digest=True,
plot_height=450,
plot_width=850):
""" Create Dictionary of Frontier Plots for use in Reporting
NB/ This includes tables (which require some hacks) remember to remove
the plotly table if we find a better way of presenting tabular data
REFERENCES:
https://stackoverflow.com/questions/59868987/plotly-saving-multiple-plots-into-a-single-html-python
https://plotly.com/python-api-reference/generated/plotly.io.to_html.html
"""
plots=dict() # create empty dictionary
plots['frontier']= self.plot_frontier()
plots['wgts']= self.plot_table(method='wgts') # table of frontier wgts
plots['wgts_bar']= self.plot_wgts_bar_stacked() # stacked wgts bar chart
plots['pcr']= self.plot_wgts_bar_stacked(
wgts=self.pcr, ytitle='Contribution-to-Risk',
title='Asset Class Percentage Contribution to Risk') # stacked PCR
#plots['tcr']= self.plot_wgts_bar_stacked(wgts=self.tcr, ytitle='Contribution to Risk', title='Asset Class Contribution to Total Risk') # stacked PCR
plots['correl']= self.plot_correl() # correlation matrix
# iterate adding stats tables for each portfolio
for p in self.port_names:
plots['stats_' + p] = self.plot_stats_table(
port=p,
periods=[52, 156, 260],
title="Simulation Stats: {}".format(p))
plots['ridgeline'] = self.plot_ridgeline() # ridgeline frontier
plots['hist'] = self.plot_histogram() # TV histogram of frontier
plots['box'] = self.plot_box() # TV boxplot
# convergence plot of inter-quartile range through time for each port
plots['convergence'] = self.plot_convergence(frontier=True)
# useful in Jupyter Notebooks - just an option to show the plots
if showplots:
for p in plots:
p.show()
# option to convert to html
# very useful as it allows us to strip the javascript from plotly plots
# see reference for more info
for k, v in plots.items():
plots[k] = v.to_html(full_html=False,
include_plotlyjs=plotlyjs,
default_height=plot_height,
default_width=plot_width,
)
# Multiple keys is a bit of a pain in markdown later
# Create a single 'stats' plot which merges the plotly stats tables
# look for string 'stats_' in keys & append to dummy list
# then convert to long html str with double line break between each
stats = []
for k, v in plots.items():
if k[:6] == 'stats_':
stats.append(v)
stats = '\n \n'.join(stats) # make long str with line-breaks
plots['stats'] = stats
# save to self.plots() dictionary by default
if digest:
self.plots['frontier'] = plots
return plots
def plot_collection_port(self, port,
showplots=False,
plotly2html=True, plotlyjs='cdn',
digest=True,
plot_height=450,
plot_width=850):
""" Create Dictionary of Single Portfolio Plots for use in Reporting
REFERENCES:
https://stackoverflow.com/questions/59868987/plotly-saving-multiple-plots-into-a-single-html-python
https://plotly.com/python-api-reference/generated/plotly.io.to_html.html
"""
plots=dict()
# Port Risk Table with MCR, TCR etc..
plots['risk_table'] = self.plot_table(method='risk', port=port, title="{}".format(port))
# Simulation paths - these make file sizes
plots['paths'] = self.plot_paths(port)
plots['cone'] = self.plot_cone(port) # more useful with smaller size
plots['stats'] = self.plot_stats_table(
port=port,
periods=[52, 156, 260],
title="Simulation Stats: {}".format(port))
plots['hist_multi'] = self.plot_histogram(port, periods=[52, 156, 260])
plots['ridgeline'] = self.plot_ridgeline(port)
# convergence shows port and 5%, 10%, 25%, 40% & 50% bands
plots['convergence'] = self.plot_convergence(frontier=False, port=port)
plots['density'] = self.plot_densitymap(sims=port)
# useful in Jupyter Notebooks - just an option to show the plots
if showplots:
for p in plots:
p.show()
# option to convert to html
# very useful as it allows us to strip the javascript from plotly plots
# see reference for more info
for k, v in plots.items():
plots[k] = v.to_html(full_html=False,
include_plotlyjs=plotlyjs,
default_height=plot_height,
default_width=plot_width,
)
# save to self.plots() dictionary by default
if digest:
self.plots['frontier'] = plots
return plots
# %% Bootstrap Reporting
# VERY MUCH WORK IN PROGRESS
def markdown_master(self, title="TEST", density=False):
""" Markdown combined report for Frontier & Portfolios
INPUTS:
density: add density plot to port report section, looks very cool
but has a tendancy to really bloat report sizes
"""
md = []
md.append(self.markdown_frontier_report(title=title))
# Individual Portfolio Intro
md.append(" \n \n ")
md.append("## Portfolio Timeseries Modelling")
md.append("Our focus through Frontier analysis is on comparing \
terminal outcome distributions at the {psims}-week point. \
Here we consider the potential experience of individual \
portfolios with respect to time. \n \n "\
.format(psims=self.psims))
for port in self.port_names:
md.append(self.markdown_port_report(port=port,
header=False,
density=density))
return "\n \n".join(md)
def markdown_frontier_report(self, plots=None, title='TEST'):
""" Markdown report created by appending lines """
# grab oneself
if plots is None:
plots = self.plots['frontier']
md = [] # dummy list container - convert to strings later
md.append("# STANLIB Multi-Strategy Stochastic Modelling")
md.append("## Frontier Report: {}".format(title))
md.append("We use an adjusted empirical copula to generate {nsims} \
simulated {psims}-week portfolio return paths. \
\
An empirical approach was selected to maintain higher-moments \
and historical returns are scaled to for forward estimates \
of prospective returns and volatility. Historical sample size \
was {weeks}-weeks; multi-factor regression models may be \
applied to extend timeseries of assets with short histories."\
.format(nsims=self.nsims,
psims=self.psims,
weeks=self.hist.shape[0]))
md.append("### Portfolio Weights & Ex-Ante Risk & Return Information")
md.append("{}".format(plots['frontier']))
md.append("{}".format(plots['wgts']))
md.append("{}".format(plots['wgts_bar']))
#md.append("{}".format(plots['tcr']))
md.append("{}".format(plots['pcr']))
md.append("{}".format(plots['correl']))
md.append("### Bootstrapped Simulations")
md.append("{}".format(plots['stats']))
md.append("Note: Std within descriptive statistics refers to the \
standard deviation of the simulated returns at period-X \
which is not the expected volatility of the portfolio.")
md.append("{}".format(plots['ridgeline']))
md.append("{}".format(plots['hist']))
md.append("{}".format(plots['box']))
md.append("{}".format(plots['convergence']))
md.append("Note: Funnel chart shows the inter-quartile range of \
simulated returns with respect to time.")
return "\n \n".join(md) # NEEDS double line-break to render plots
def markdown_port_report(self, port, header=True, density=False):
""" Markdown report created by appending lines """
# grab plots (requires self.plot_collection_port() to be have run)
plots = self.plots[port]
# dummy list container - convert to strings later
md = []
## Append markdown
# header is optional and my not be wanted if creating combined reports
# assumption is we will always want the frontier report
if header:
md.append("# STANLIB Multi-Strategy Bootstrap Report")
md.append("## Portfolio Report: {}".format(port))
md.append("{}".format(plots['risk_table']))
md.append("{}".format(plots['cone']))
md.append("{}".format(plots['stats']))
md.append("{}".format(plots['hist_multi']))
md.append("{}".format(plots['ridgeline']))
md.append("{}".format(plots['convergence']))
# Density plot massive to we make optional
if density:
md.append("{}".format(plots['density']))
return "\n \n".join(md) # NEEDS double line-break to render plots
# %% TESTING
def unit_test(write_report=True, plots_individual=False):
""" Not proper unit testing
Create:
range of dummy 3-asset portfolios (named RP1-RP4)
vectors of expected returns, volatility, alpha & TE
pseudo-random 20-year weekly returns with means & std from mu/vol
Set up a Bootstrap class and initialise with dummy data then run all
the main functions and test output/plots.
Will annotate with guidance on what answers ought to look like but haven't
bothered actually providing output figures.
"""
from topgun.reporting import Reporting
### Setup a Range of 4 Dummy Portfolios (RP1-4) & Dummy Returns
# Returns are a random normal distribution with 20-years of weekly data
## Returns & Vols
# Bootstrap is designed to take pd.Series() as main vector inputs
universe = ['EQUITY', 'CREDIT', 'RATES']
mu= pd.Series(data=[0.1, 0.05, 0.01], index=universe, name='ExRtn')
vol= pd.Series(data=[0.15, 0.08, 0.01], index=universe, name='Std')
alpha= pd.Series(data=[0.02, 0.01, 0.01], index=universe, name='active')
te = | pd.Series(data=[0.03, 0.03, 0.01], index=universe, name='tracking') | pandas.Series |
"""
This module can perform enrichment analyses on a given set of genomic features and visualize their intersections. \
These include gene ontology/tissue/phenotype enrichment, enrichment for user-defined attributes, \
set visualization ,etc. \
Results of enrichment analyses can be saved to .csv files.
"""
import random
import numpy as np
import pandas as pd
from rnalysis import general, filtering
import tissue_enrichment_analysis as tea
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.cm import ScalarMappable
from pathlib import Path
import statsmodels.stats.multitest as multitest
from ipyparallel import Client
from itertools import repeat, compress
import upsetplot as upset
import matplotlib_venn as vn
import warnings
from typing import Union, List, Set, Dict, Tuple, Iterable, Type, Callable
class FeatureSet:
""" receives a filtered gene set and preforms various enrichment analyses"""
__slots__ = {'gene_set': 'set of feature names/indices', 'set_name': 'name of the FeatureSet'}
_go_dicts = {}
def __init__(self, gene_set: Union[List[str], Set[
str], filtering.Filter, filtering.CountFilter, filtering.DESeqFilter, filtering.FoldChangeFilter] = None,
set_name: str = ''):
"""
:param gene_set: the set of genomic features to be used in downstream analyses
:type gene_set: filtering.Filter object, set of strings or list of strings
:param set_name: name of the FeatureSet
:type set_name: str
:Examples:
>>> from rnalysis import enrichment, filtering
>>> my_set = enrichment.FeatureSet({'gene1','gene2','gene2'}, 'name of my set')
>>> filter_obj = filtering.CountFilter('tests/counted.csv')
>>> my_other_set = enrichment.FeatureSet(filter_obj, 'name of my other set')
"""
if gene_set is None:
self.gene_set = general.parse_wbgene_string(input(
"Please insert genomic features/indices separated by newline "
"(example: \n'WBGene00000001\nWBGene00000002\nWBGene00000003')"))
elif isinstance(gene_set, set):
pass
elif isinstance(gene_set, list):
gene_set = set(gene_set)
elif issubclass(gene_set.__class__, filtering.Filter):
gene_set = gene_set.index_set
else:
raise TypeError(f"Error: 'gene_set' must be a set, list or tuple! Is a {type(gene_set)} instead. ")
self.gene_set = gene_set
self.set_name = set_name
def __repr__(self):
return f"FeatureSet: {self.set_name}\n" + self.gene_set.__str__()
@staticmethod
def _from_string(msg: str = '', del_spaces: bool = False, delimiter: str = '\n'):
"""
Takes a manual string input from the user, and then splits it using a comma delimiter into a list of values. \
Called when an FeatureSet instance is created without input, \
or when FeatureSet.enrich_randomization is called without input.
:param msg: a promprt to be printed to the user
:param del_spaces: if True, will delete all spaces in each delimited value.
:param delimiter: the delimiter used to separate the values. Default is '\n'
:return: A list of the comma-seperated values the user inserted.
"""
string = input(msg)
split = string.split(sep=delimiter)
if del_spaces:
for i in range(len(split)):
split[i] = split[i].replace(' ', '')
if split[-1] == '':
split = split[:-1]
return split
def _inplace(self, gene_set: set, inplace: bool):
"""
Executes the user's choice whether to perform set operations in-place \
or create a new instance of the FeatureSet object.
:param gene_set: The set of features resulting from the set operations
:param inplace: bool. If True, gene_set will be saved to the current FeatureSet object. \
If False, gene_set will be used to created a new instance of FeatureSet.
:return: If inplace is True, returns a new instance of FeatureSet.
"""
if inplace:
self.gene_set = gene_set
else:
return FeatureSet(gene_set)
def save_txt(self, fname: Union[str, Path]):
"""
Save the list of features in the FeatureSet object under the specified filename and path.
:type fname: str or pathlib.Path
:param fname: full filename/path for the output file. Can include the '.txt' suffix but doesn't have to.
"""
assert isinstance(fname, (str, Path)), "fname must be str or pathlib.Path!"
if isinstance(fname, str):
if not fname.endswith('.txt'):
fname = fname + '.txt'
elif isinstance(fname, Path):
if not fname.suffix == '.txt':
fname = Path(f"{str(fname.parent)}{fname.name}.txt")
with open(fname, 'w') as f:
for gene in self.gene_set:
f.write(gene + '\n')
def _set_ops(self, others, op: Callable):
"""
Performs a given set operation on self and on another object (FeatureSet or set).
:type other: FeatureSet, set or str
:param other: Other object to perform set operation with.
:type: op: Callable (set.union, set.intersection, set.difference or set.symmetric difference)
:param op: The set operation to be performed.
:return: A set resulting from the set operation.
"""
others = list(others)
for i, other in enumerate(others):
if isinstance(other, set):
pass
elif isinstance(other, FeatureSet):
others[i] = other.gene_set
elif isinstance(other, str):
others[i] = general.parse_wbgene_string(other)
else:
raise TypeError("'other' must be an FeatureSet object or a set!")
try:
return op(self.gene_set, *others)
except TypeError as e:
if op == set.symmetric_difference:
raise TypeError(
f"Symmetric difference can only be calculated for two objects, {len(others) + 1} were given!")
else:
raise e
def union(self, *others, inplace: bool = True):
"""
Calculates the set union of the indices from multipple FeatureSet objects \
(the indices that exist in at least one of the FeatureSet objects).
:type others: FeatureSet, set or str
:param others: The objects against which the current object will be compared.
:type inplace: bool
:param inplace: If True (default), modifies the current instance of FeatureSet. \
If False, returns a new instance of FeatureSet.
:return: if inplace is False, returns a new instance of FeatureSet.
:Examples:
>>> from rnalysis import enrichment
>>> en = enrichment.FeatureSet({'WBGene00000004','WBGene00000005','WBGene00000006'}, 'set name')
>>> en2 = enrichment.FeatureSet({'WBGene00000004','WBGene00000001'})
>>> s = {'WBGene00000001','WBGene00000002','WBGene00000003'}
>>> en.union(s, en2)
>>> print(en)
FeatureSet: set name
{'WBGene00000003', 'WBGene00000004', 'WBGene00000001', 'WBGene00000002', 'WBGene00000006', 'WBGene00000005'}
"""
return self._inplace(self._set_ops(others, set.union), inplace)
def intersection(self, *others, inplace: bool = True):
"""
Calculates the set intersection of the indices from multiple FeatureSet objects \
(the indices that exist in ALL of the FeatureSet objects).
:type others: FeatureSet, set or str
:param others: The objects against which the current object will be compared.
:type inplace: bool
:param inplace: If True (default), modifies the current instance of FeatureSet. \
If False, returns a new instance of FeatureSet.
:return: if inplace is False, returns a new instance of FeatureSet.
:Examples:
>>> from rnalysis import enrichment
>>> en = enrichment.FeatureSet({'WBGene00000001','WBGene00000002','WBGene00000006'}, 'set name')
>>> s = {'WBGene00000001','WBGene00000002','WBGene00000003'}
>>> en2 = enrichment.FeatureSet({'WBGene00000004','WBGene00000001'})
>>> en.intersection(s, en2)
>>> print(en)
FeatureSet: set name
{'WBGene00000001'}
"""
return self._inplace(self._set_ops(others, set.intersection), inplace)
def difference(self, *others, inplace: bool = True):
"""
Calculates the set difference of the indices from multiple FeatureSet objects \
(the indices that appear in the first FeatureSet object but NOT in the other objects).
:type others: FeatureSet, set or str
:param others: The objects against which the current object will be compared.
:type inplace: bool
:param inplace: If True (default), modifies the current instance of FeatureSet. \
If False, returns a new instance of FeatureSet.
:return: if inplace is False, returns a new instance of FeatureSet.
:Examples:
>>> from rnalysis import enrichment
>>> en = enrichment.FeatureSet({'WBGene00000001','WBGene00000002','WBGene00000006'}, 'set name')
>>> s = {'WBGene00000001','WBGene00000002','WBGene00000003'}
>>> en2 = enrichment.FeatureSet({'WBGene00000004','WBGene00000001'})
>>> en.difference(s, en2)
>>> print(en)
FeatureSet: set name
{'WBGene00000006'}
"""
return self._inplace(self._set_ops(others, set.difference), inplace)
def symmetric_difference(self, other, inplace: bool = True):
"""
Calculates the set symmetric difference of the indices from two FeatureSet objects \
(the indices that appear in EXACTLY ONE of the FeatureSet objects, and not both/neither). \
A-symmetric difference-B is equivalent to (A-difference-B)-union-(B-difference-A).
:type other: FeatureSet, set or str
:param other: A second object against which the current object will be compared.
:type inplace: bool
:param inplace: If True (default), modifies the current instance of FeatureSet. \
If False, returns a new instance of FeatureSet.
:return: if inplace is False, returns a new instance of FeatureSet.
:Examples:
>>> from rnalysis import enrichment
>>> en = enrichment.FeatureSet({'WBGene00000001','WBGene00000002','WBGene00000006'}, 'set name')
>>> en2 = enrichment.FeatureSet({'WBGene00000004','WBGene00000001'})
>>> en.symmetric_difference(en2)
>>> print(en)
FeatureSet: set name
{'WBGene00000002', 'WBGene00000006', 'WBGene00000004'}
"""
return self._inplace(self._set_ops([other], set.symmetric_difference), inplace)
@staticmethod
def _enrichment_save_csv(df: pd.DataFrame, fname: str):
"""
Internal method, used to save enrichment results to .csv files. Static class method.
:param df: pandas DataFrame to be saved.
:param fname: Name and full path under which the DataFrame will be saved
"""
if fname is None:
fname = input("Please insert the full name and path to save the file to")
else:
assert isinstance(fname, (str, Path))
if isinstance(fname, Path):
fname = str(Path)
general.save_to_csv(df, filename=fname + '.csv')
def go_enrichment(self, mode: str = 'all', alpha: float = 0.05, save_csv: bool = False, fname: str = None):
"""
Analyzes GO, Tissue and/or Phenotype enrichment for the given group of genomic features. \
Uses the the Anatomy, Phenotype and Gene Ontology annotations for C. elegans. \
Corrected p-values are calculated using hypergeometric statistics. \
For more details see GitHub page of the developers: https://github.com/dangeles/TissueEnrichmentAnalysis
:type mode: 'go', 'tissue', 'phenotype' or 'all' (default 'all')
:param mode: the enrichment you wish to perform. 'go' for gene ontology enrichment, \
'tissue' for tissue enrichment, 'phenotype' for phenotype enrichment, or 'all' for all three.
:type alpha: float between 0 and 1 (default 0.05)
:param alpha: Significance threshold.
:type save_csv: bool (default False)
:param save_csv: If True, save the result to a csv.
:type fname: str or pathlib.Path
:param fname: Name and path in which to save the results. Must be specified if save_csv is True.
:return: a DataFrame which contains the significant enrichmenet terms
.. figure:: go_en.png
:align: center
:scale: 40 %
Example plot of GO enrichment
.. figure:: tissue_en.png
:align: center
:scale: 40 %
Example plot of Tissue enrichment
"""
assert isinstance(alpha, float), "alpha must be a float!"
assert isinstance(mode, str), "'mode' must be a string!"
plt.style.use('seaborn-white')
if mode == 'all':
d = []
df_comb = pd.DataFrame()
for k, arg in enumerate(('go', 'tissue', 'phenotype')):
print(f'Calculating... {100 * k / 3 :.2f}% done')
if arg in FeatureSet._go_dicts:
d.append(FeatureSet._go_dicts[arg])
else:
d.append(tea.fetch_dictionary(arg))
FeatureSet._go_dicts[arg] = d[-1]
df = tea.enrichment_analysis(self.gene_set, d[-1], alpha=alpha)
if not df.empty:
df_comb = df_comb.append(df)
plt.figure()
tea.plot_enrichment_results(df, title=f'{arg.capitalize()} Enrichment Analysis', analysis=arg)
plt.title(f'{arg.capitalize()} Enrichment Analysis for sample {self.set_name}', fontsize=20)
else:
assert (mode == 'go' or mode == 'tissue' or mode == 'phenotype'), "Invalid mode!"
d = tea.fetch_dictionary(mode)
df_comb = tea.enrichment_analysis(self.gene_set, d, show=True)
if not df_comb.empty:
tea.plot_enrichment_results(df_comb, title=f'{mode.capitalize()} Enrichment Analysis', analysis=mode)
plt.title(f'{mode.capitalize()} Enrichment Analysis', fontsize=20)
if save_csv:
self._enrichment_save_csv(df_comb, fname)
plt.show()
return df_comb
@staticmethod
def _single_enrichment(gene_set, attributes, attr_ref_df: pd.DataFrame, fraction: Callable, reps: int):
attributes = [attributes] if not isinstance(attributes, list) else attributes
for attribute in attributes:
assert isinstance(attribute, str), f"Error in attribute {attribute}: attributes must be strings!"
df = attr_ref_df[[attribute, 'int_index']]
srs = df[attribute]
srs_int = (df.set_index('int_index', inplace=False))[attribute]
obs_srs = srs.loc[gene_set]
n = obs_srs.shape[0]
expected_fraction = fraction(srs)
observed_fraction = fraction(obs_srs)
log2_fold_enrichment = np.log2(observed_fraction / expected_fraction) if observed_fraction > 0 else -np.inf
ind = srs_int.index
if log2_fold_enrichment >= 0:
success = sum(
(fraction(srs_int.loc[np.random.choice(ind, n, replace=False)]) >= observed_fraction
for _ in repeat(None, reps)))
else:
success = sum(
(fraction(srs_int.loc[np.random.choice(ind, n, replace=False)]) <= observed_fraction
for _ in repeat(None, reps)))
pval = (success + 1) / (reps + 1)
return [attribute, n, int(n * observed_fraction), n * expected_fraction, log2_fold_enrichment, pval]
@staticmethod
def _enrichment_get_attrs(attributes, attr_ref_path):
if attributes is None:
attributes = FeatureSet._from_string(
"Please insert attributes separated by newline "
"(for example: \n'epigenetic_related_genes\nnrde-3 targets\nALG-3/4 class small RNAs')")
elif isinstance(attributes, (str, int)):
attributes = [attributes]
else:
assert isinstance(attributes, (list, tuple, set)), "'attributes' must be a list, tuple or set!"
for attr in attributes:
if isinstance(attr, int):
assert attr >= 0, f"Error in attribute number {attr}: index must be non-negative!"
else:
assert isinstance(attr, str), f"Invalid type of attribute {attr}: {type(attr)}"
try:
with open(attr_ref_path) as f:
all_attrs = f.readline().split(',')[1::]
except:
raise ValueError(f"Invalid or nonexistent Attribute Reference Table path! path:'{attr_ref_path}'")
if all_attrs[-1].endswith('\n'):
all_attrs[-1] = all_attrs[-1][:-1]
if attributes == ['all']:
attributes = all_attrs
elif np.all([True if isinstance(i, int) else False for i in attributes]):
select_attributes = []
for i in attributes:
select_attributes.append(all_attrs[i])
return select_attributes
return attributes
def _enrichment_get_reference(self, biotype, background_genes, attr_ref_path, biotype_ref_path):
gene_set = self.gene_set
attr_ref_df = general.load_csv(attr_ref_path)
general._attr_table_assertions(attr_ref_df)
attr_ref_df.set_index('gene', inplace=True)
assert (isinstance(biotype, (str, list, set, tuple)))
if background_genes is None:
pass
else:
assert isinstance(background_genes,
(set, FeatureSet)) or issubclass(background_genes.__class__,
filtering.Filter), f"background_genes must be a set, " \
f"enrichment.FeatureSet or filtering.Filter;" \
f" instead is {type(background_genes)}"
if isinstance(background_genes, FeatureSet):
background_genes = background_genes.gene_set
elif issubclass(background_genes.__class__, filtering.Filter):
background_genes = background_genes.index_set
if biotype != 'all':
warnings.warn(
"both 'biotype' and 'background_genes' were specified. Therefore 'biotype' is ignored. ")
biotype = 'all'
attr_ref_df = attr_ref_df.loc[background_genes.intersection(set(attr_ref_df.index))]
if len(attr_ref_df.index) < len(background_genes):
warnings.warn(
f"{len(background_genes) - len(attr_ref_df.index)} indices from the requested "
f"background genes do not appear in the Attribute Reference Table, and are therefore ignored. \n"
f"This leaves a total of {len(attr_ref_df.index)} background genes. ")
if biotype == 'all':
pass
else:
biotype_ref_df = general.load_csv(biotype_ref_path)
general._biotype_table_assertions(biotype_ref_df)
biotype_ref_df.set_index('gene', inplace=True)
biotype_ref_df.columns = biotype_ref_df.columns.str.lower()
if isinstance(biotype, (list, tuple, set)):
mask = pd.Series(np.zeros_like(biotype_ref_df['biotype'].values, dtype=bool),
biotype_ref_df['biotype'].index,
name='biotype')
for bio in biotype:
mask = mask | (biotype_ref_df['biotype'] == bio)
else:
biotype_ref_df = biotype_ref_df.loc[biotype_ref_df.index.intersection(attr_ref_df.index)]
mask = biotype_ref_df['biotype'] == biotype
attr_ref_df = attr_ref_df.loc[biotype_ref_df[mask].index]
attr_ref_df.sort_index(inplace=True)
attr_ref_df['int_index'] = [i for i in range(len(attr_ref_df.index))]
print(f"{len(attr_ref_df.index)} background genes are used. ")
not_in_bg = gene_set.difference(set(attr_ref_df.index))
if len(not_in_bg) > 0:
gene_set = gene_set.difference(not_in_bg)
warnings.warn(f"{len(not_in_bg)} genes in the enrichment set do not appear in the background genes. \n"
f"Enrichment will be run on the remaining {len(gene_set)}.")
return attr_ref_df, gene_set
def enrich_randomization_parallel(self, attributes: Union[Iterable[str], str, Iterable[int], int] = None,
fdr: float = 0.05, reps: int = 10000, biotype: str = 'protein_coding',
background_genes=None, attr_ref_path: str = 'predefined',
biotype_ref_path: str = 'predefined', save_csv: bool = False, fname=None,
return_fig: bool = False, random_seed: int = None):
"""
Calculates enrichment scores, p-values and adjusted p-values \
for enrichment and depletion of selected attributes from an Attribute Reference Table using parallel processing. \
Background set is determined by either the input variable 'background_genes', \
or by the input variable 'biotype' and a Biotype Reference Table. \
Parallel processing makes this function generally faster than FeatureSet.enrich_randomization. \
To use it you must first start a parallel session, using rnalysis.general.start_parallel_session(). \
P-values are calculated using a randomization test with the formula p = (successes + 1)/(repeats + 1). \
P-values are corrected for multiple comparisons using \
the Benjamini–Hochberg step-up procedure (original FDR method). \
Enrichment/depletion is determined automatically by the calculated enrichment score: \
if log2(enrichment score) is positive then enrichment is assumed, \
and if log2(enrichment score) is negative then depletion is assumed. \
In plots, for the clarity of display, complete depletion (linear enrichment = 0) \
appears with the smallest value in the scale.
:type attributes: str, int, iterable (list, tuple, set, etc) of str/int, or 'all'
:param attributes: An iterable of attribute names or attribute numbers \
(according to their order in the Attribute Reference Table). \
If 'all', all of the attributes in the Attribute Reference Table will be used. \
If None, a manual input prompt will be raised.
:type fdr: float between 0 and 1
:param fdr: Indicates the FDR threshold for significance.
:type reps: int larger than 0
:param reps: How many repetitions to run the randomization for. \
10,000 is the default. Recommended 10,000 or higher.
:type attr_ref_path: str or pathlib.Path (default 'predefined')
:param attr_ref_path: the path of the Attribute Reference Table from which user-defined attributes will be drawn.
:type biotype_ref_path: str or pathlib.Path (default 'predefined')
:param biotype_ref_path: the path of the Biotype Reference Table. \
Will be used to generate background set if 'biotype' is specified.
:type biotype: str specifying a specific biotype, or 'all'. Default 'protein_coding'.
:param biotype: determines the background genes by their biotype. Requires specifying a Biotype Reference Table. \
'all' will include all genomic features in the reference table, \
'protein_coding' will include only protein-coding genes from the reference table, etc. \
Cannot be specified together with 'background_genes'.
:type background_genes: set of feature indices, filtering.Filter object, or enrichment.FeatureSet object
:param background_genes: a set of specific feature indices to be used as background genes. \
Cannot be specified together with 'biotype'.
:type save_csv: bool, default False
:param save_csv: If True, will save the results to a .csv file, under the name specified in 'fname'.
:type fname: str or pathlib.Path
:param fname: The full path and name of the file to which to save the results. For example: \
r'C:\dir\file'. No '.csv' suffix is required. If None (default), fname will be requested in a manual prompt.
:type return_fig: bool (default False)
:param return_fig: if True, returns a matplotlib Figure object in addition to the results DataFrame.
:rtype: pd.DataFrame (default) or Tuple[pd.DataFrame, matplotlib.figure.Figure]
:return:
a pandas DataFrame with the indicated attribute names as rows/index, and the columns 'log2_fold_enrichment'
and 'pvalue'; and a matplotlib Figure, if 'return_figure' is set to True.
.. figure:: enrichment_randomization.png
:align: center
:scale: 40 %
Example plot of enrich_randomization_parallel()
"""
attr_ref_path = general._get_attr_ref_path(attr_ref_path)
biotype_ref_path = general._get_biotype_ref_path(biotype_ref_path)
attr_ref_df, gene_set = self._enrichment_get_reference(biotype=biotype, background_genes=background_genes,
attr_ref_path=attr_ref_path,
biotype_ref_path=biotype_ref_path)
attributes = self._enrichment_get_attrs(attributes=attributes, attr_ref_path=attr_ref_path)
fraction = lambda mysrs: (mysrs.shape[0] - mysrs.isna().sum()) / mysrs.shape[0]
client = Client()
dview = client[:]
dview.execute("""import numpy as np
import pandas as pd""")
if random_seed is not None:
assert isinstance(random_seed, int) and random_seed >= 0, f"random_seed must be a non-negative integer. " \
f"Value {random_seed} invalid."
dview.execute(f"np.random.seed({random_seed})")
k = len(attributes)
gene_set_rep = list(repeat(gene_set, k))
attr_ref_df_rep = list(repeat(attr_ref_df, k))
fraction_rep = list(repeat(fraction, k))
reps_rep = list(repeat(reps, k))
res = dview.map(FeatureSet._single_enrichment, gene_set_rep, attributes, attr_ref_df_rep, fraction_rep,
reps_rep)
enriched_list = res.result()
res_df = pd.DataFrame(enriched_list,
columns=['name', 'samples', 'n obs', 'n exp', 'log2_fold_enrichment',
'pval'])
res_df.replace(-np.inf, -np.max(np.abs(res_df['log2_fold_enrichment'].values)))
significant, padj = multitest.fdrcorrection(res_df['pval'].values, alpha=fdr)
res_df['padj'] = padj
res_df['significant'] = significant
res_df.set_index('name', inplace=True)
fig = self._plot_enrich_randomization(res_df, title=self.set_name)
if save_csv:
self._enrichment_save_csv(res_df, fname)
if return_fig:
return res_df, fig
return res_df
def enrich_randomization(self, attributes: Union[Iterable[str], str, Iterable[int], int] = None, fdr: float = 0.05,
reps: int = 10000, biotype: str = 'protein_coding', background_genes=None,
attr_ref_path: str = 'predefined', biotype_ref_path: str = 'predefined',
save_csv: bool = False, fname=None, return_fig: bool = False, random_seed: int = None):
"""
Calculates enrichment scores, p-values and adjusted p-values \
for enrichment and depletion of selected attributes from an Attribute Reference Table. \
Background set is determined by either the input variable 'background_genes', \
or by the input variable 'biotype' and a Biotype Reference Table. \
P-values are calculated using a randomization test with the formula p = (successes + 1)/(repeats + 1). \
P-values are corrected for multiple comparisons using \
the Benjamini–Hochberg step-up procedure (original FDR method). \
Enrichment/depletion is determined automatically by the calculated enrichment score: \
if log2(enrichment score) is positive then enrichment is assumed, \
and if log2(enrichment score) is negative then depletion is assumed. \
In plots, for the clarity of display, complete depletion (linear enrichment = 0) \
appears with the smallest value in the scale.
:type attributes: str, int, iterable (list, tuple, set, etc) of str/int, or 'all'.
:param attributes: An iterable of attribute names or attribute numbers \
(according to their order in the Attribute Reference Table). \
If 'all', all of the attributes in the Attribute Reference Table will be used. \
If None, a manual input prompt will be raised.
:type fdr: float between 0 and 1
:param fdr: Indicates the FDR threshold for significance.
:type reps: int larger than 0
:param reps: How many repetitions to run the randomization for. \
10,000 is the default. Recommended 10,000 or higher.
:type attr_ref_path: str or pathlib.Path (default 'predefined')
:param attr_ref_path: the path of the Attribute Reference Table from which user-defined attributes will be drawn.
:type biotype_ref_path: str or pathlib.Path (default 'predefined')
:param biotype_ref_path: the path of the Biotype Reference Table. \
Will be used to generate background set if 'biotype' is specified.
:type biotype: str specifying a specific biotype, list/set of strings each specifying a biotype, or 'all'. \
Default 'protein_coding'.
:param biotype: determines the background genes by their biotype. Requires specifying a Biotype Reference Table. \
'all' will include all genomic features in the reference table, \
'protein_coding' will include only protein-coding genes from the reference table, etc. \
Cannot be specified together with 'background_genes'.
:type background_genes: set of feature indices, filtering.Filter object, or enrichment.FeatureSet object
:param background_genes: a set of specific feature indices to be used as background genes. \
Cannot be specified together with 'biotype'.
:type save_csv: bool, default False
:param save_csv: If True, will save the results to a .csv file, under the name specified in 'fname'.
:type fname: str or pathlib.Path
:param fname: The full path and name of the file to which to save the results. For example: \
r'C:\dir\file'. No '.csv' suffix is required. If None (default), fname will be requested in a manual prompt.
:type return_fig: bool (default False)
:param return_fig: if True, returns a matplotlib Figure object in addition to the results DataFrame.
:rtype: pd.DataFrame (default) or Tuple[pd.DataFrame, matplotlib.figure.Figure]
:return: a pandas DataFrame with the indicated attribute names as rows/index, and the columns 'log2_fold_enrichment'
and 'pvalue'; and a matplotlib Figure, if 'return_figure' is set to True.
.. figure:: enrichment_randomization.png
:align: center
:scale: 40 %
Example plot of enrich_randomization()
"""
attr_ref_path = general._get_attr_ref_path(attr_ref_path)
biotype_ref_path = general._get_biotype_ref_path(biotype_ref_path)
attr_ref_df, gene_set = self._enrichment_get_reference(biotype=biotype, background_genes=background_genes,
attr_ref_path=attr_ref_path,
biotype_ref_path=biotype_ref_path)
attributes = self._enrichment_get_attrs(attributes=attributes, attr_ref_path=attr_ref_path)
fraction = lambda mysrs: (mysrs.shape[0] - mysrs.isna().sum()) / mysrs.shape[0]
enriched_list = []
if random_seed is not None:
assert isinstance(random_seed, int) and random_seed >= 0, f"random_seed must be a non-negative integer. " \
f"Value {random_seed} invalid."
random.seed(random_seed)
for k, attribute in enumerate(attributes):
assert isinstance(attribute, str), f"Error in attribute {attribute}: attributes must be strings!"
print(f"Finished {k} attributes out of {len(attributes)}")
df = attr_ref_df[[attribute, 'int_index']]
srs = df[attribute]
srs_int = (df.set_index('int_index', inplace=False))[attribute]
obs_srs = srs.loc[gene_set]
n = obs_srs.shape[0]
expected_fraction = fraction(srs)
observed_fraction = fraction(obs_srs)
log2_fold_enrichment = np.log2(observed_fraction / expected_fraction) if observed_fraction > 0 else -np.inf
ind = set(srs_int.index)
if log2_fold_enrichment >= 0:
success = sum(
(fraction(srs_int.loc[random.sample(ind, n)]) >= observed_fraction
for _ in repeat(None, reps)))
else:
success = sum(
(fraction(srs_int.loc[random.sample(ind, n)]) <= observed_fraction
for _ in repeat(None, reps)))
pval = (success + 1) / (reps + 1)
enriched_list.append(
(attribute, n, int(n * observed_fraction), n * expected_fraction, log2_fold_enrichment, pval))
res_df = pd.DataFrame(enriched_list,
columns=['name', 'samples', 'n obs', 'n exp', 'log2_fold_enrichment',
'pval'])
res_df.replace(-np.inf, -np.max(np.abs(res_df['log2_fold_enrichment'].values)))
significant, padj = multitest.fdrcorrection(res_df['pval'].values, alpha=fdr)
res_df['padj'] = padj
res_df['significant'] = significant
res_df.set_index('name', inplace=True)
fig = self._plot_enrich_randomization(res_df, title=self.set_name)
if save_csv:
self._enrichment_save_csv(res_df, fname)
if return_fig:
return res_df, fig
return res_df
def enrich_hypergeometric(self, attributes: Union[Iterable[str], str, Iterable[int], int] = None, fdr: float = 0.05,
biotype: str = 'protein_coding', background_genes=None,
attr_ref_path: str = 'predefined', biotype_ref_path: str = 'predefined',
save_csv: bool = False, fname=None, return_fig: bool = False):
"""
Calculates enrichment scores, p-values and adjusted p-values \
for enrichment and depletion of selected attributes from an Attribute Reference Table, \
based on a hypergeometric test. \
Background set is determined by either the input variable 'background_genes', \
or by the input variable 'biotype' and a Biotype Reference Table. \
P-values are calculated using a hypergeometric test: \
Given M genes in the background set, n genes in the test set, \
with N genes from the background set belonging to a specific attribute (or 'success') \
and X genes from the test set belonging to that attribute. \
If we were to randomly draw n genes from the background set (without replacement), \
what is the probability of drawing X or more (in case of enrichment)/X or less (in case of depletion) \
genes belonging to the given attribute? \
P-values are corrected for multiple comparisons using \
the Benjamini–Hochberg step-up procedure (original FDR method). \
Enrichment/depletion is determined automatically by the calculated enrichment score: \
if log2(enrichment score) is positive then enrichment is assumed, \
and if log2(enrichment score) is negative then depletion is assumed. \
In plots, for the clarity of display, complete depletion (linear enrichment = 0) \
appears with the smallest value in the scale.
:type attributes: str, int, iterable (list, tuple, set, etc) of str/int, or 'all'.
:param attributes: An iterable of attribute names or attribute numbers \
(according to their order in the Attribute Reference Table). \
If 'all', all of the attributes in the Attribute Reference Table will be used. \
If None, a manual input prompt will be raised.
:type fdr: float between 0 and 1
:param fdr: Indicates the FDR threshold for significance.
:type attr_ref_path: str or pathlib.Path (default 'predefined')
:param attr_ref_path: the path of the Attribute Reference Table from which user-defined attributes will be drawn.
:type biotype_ref_path: str or pathlib.Path (default 'predefined')
:param biotype_ref_path: the path of the Biotype Reference Table. \
Will be used to generate background set if 'biotype' is specified.
:type biotype: str specifying a specific biotype, list/set of strings each specifying a biotype, or 'all'. \
Default 'protein_coding'.
:param biotype: determines the background genes by their biotype. Requires specifying a Biotype Reference Table. \
'all' will include all genomic features in the reference table, \
'protein_coding' will include only protein-coding genes from the reference table, etc. \
Cannot be specified together with 'background_genes'.
:type background_genes: set of feature indices, filtering.Filter object, or enrichment.FeatureSet object
:param background_genes: a set of specific feature indices to be used as background genes. \
Cannot be specified together with 'biotype'.
:type save_csv: bool, default False
:param save_csv: If True, will save the results to a .csv file, under the name specified in 'fname'.
:type fname: str or pathlib.Path
:param fname: The full path and name of the file to which to save the results. For example: \
r'C:\dir\file'. No '.csv' suffix is required. If None (default), fname will be requested in a manual prompt.
:type return_fig: bool (default False)
:param return_fig: if True, returns a matplotlib Figure object in addition to the results DataFrame.
:rtype: pd.DataFrame (default) or Tuple[pd.DataFrame, matplotlib.figure.Figure]
:return: a pandas DataFrame with the indicated attribute names as rows/index, and the columns 'log2_fold_enrichment'
and 'pvalue'; and a matplotlib Figure, if 'return_figure' is set to True.
.. figure:: enrichment_randomization.png
:align: center
:scale: 40 %
Example plot of enrich_hypergeometric()
"""
attr_ref_path = general._get_attr_ref_path(attr_ref_path)
biotype_ref_path = general._get_biotype_ref_path(biotype_ref_path)
attr_ref_df, gene_set = self._enrichment_get_reference(biotype=biotype, background_genes=background_genes,
attr_ref_path=attr_ref_path,
biotype_ref_path=biotype_ref_path)
attributes = self._enrichment_get_attrs(attributes=attributes, attr_ref_path=attr_ref_path)
fraction = lambda mysrs: (mysrs.shape[0] - mysrs.isna().sum()) / mysrs.shape[0]
enriched_list = []
for k, attribute in enumerate(attributes):
assert isinstance(attribute, str), f"Error in attribute {attribute}: attributes must be strings!"
print(f"Finished {k} attributes out of {len(attributes)}")
df = attr_ref_df[[attribute, 'int_index']]
srs = df[attribute]
obs_srs = srs.loc[gene_set]
n = obs_srs.shape[0]
expected_fraction = fraction(srs)
observed_fraction = fraction(obs_srs)
log2_fold_enrichment = np.log2(observed_fraction / expected_fraction) if observed_fraction > 0 else -np.inf
pval = self._calc_hypergeometric_pval(bg_size=srs.shape[0], go_size=srs.notna().sum(),
de_size=obs_srs.shape[0], go_de_size=obs_srs.notna().sum())
enriched_list.append(
(attribute, n, int(n * observed_fraction), n * expected_fraction, log2_fold_enrichment, pval))
res_df = pd.DataFrame(enriched_list,
columns=['name', 'samples', 'n obs', 'n exp', 'log2_fold_enrichment',
'pval'])
res_df.replace(-np.inf, -np.max(np.abs(res_df['log2_fold_enrichment'].values)))
significant, padj = multitest.fdrcorrection(res_df['pval'].values, alpha=fdr)
res_df['padj'] = padj
res_df['significant'] = significant
res_df.set_index('name', inplace=True)
fig = self._plot_enrich_randomization(res_df, title=self.set_name)
if save_csv:
self._enrichment_save_csv(res_df, fname)
if return_fig:
return res_df, fig
return res_df
@staticmethod
def _plot_enrich_randomization(df: pd.DataFrame, title: str = ''):
"""
Receives a DataFrame output from FeatureSet.enrich_randomization, and plots it in a bar plort \
Static class method. \
For the clarity of display, complete depletion (linear enrichment = 0) \
appears with the smallest value in the scale.
:param df: a pandas DataFrame created by FeatureSet.enrich_randomization.
:param title: plot title.
:return: a matplotlib.pyplot.bar instance
"""
plt.style.use('seaborn-white')
enrichment_names = df.index.values.tolist()
enrichment_pvalue = df['padj']
# set enrichment scores which are 'inf' or '-inf' to be the second highest/lowest enrichment score in the list
enrichment_scores = df['log2_fold_enrichment'].values.copy()
scores_no_inf = [i for i in enrichment_scores if i != np.inf and i != -np.inf and i < 0]
if len(scores_no_inf) == 0:
scores_no_inf.append(-1)
for i in range(len(enrichment_scores)):
if enrichment_scores[i] == -np.inf:
enrichment_scores[i] = min(scores_no_inf)
# get color values for bars
data_color = [(i / 3) * 127.5 for i in enrichment_scores]
data_color_norm = [i + 127.5 for i in data_color]
data_color_norm_256 = [int(i) if i != np.inf and i != -np.inf else np.sign(i) * max(np.abs(scores_no_inf)) for i
in data_color_norm]
my_cmap = plt.cm.get_cmap('coolwarm')
colors = my_cmap(data_color_norm_256)
# generate bar plot
fig, ax = plt.subplots(constrained_layout=True, figsize=[6.4 * 0.5 + 0.5 * df.shape[0], 5.6])
bar = ax.bar(x=range(len(enrichment_names)), height=enrichment_scores, color=colors, edgecolor='black',
linewidth=1)
bar.tick_labels = enrichment_names
# add horizontal line
ax.axhline(color='black', linewidth=1)
# add colorbar
sm = ScalarMappable(cmap=my_cmap, norm=plt.Normalize(3, -3))
sm.set_array([])
cbar = fig.colorbar(sm)
cbar.set_label('Colorbar', fontsize=12)
# apply xticks
ax.set_xticks(range(len(enrichment_names)))
ax.set_xticklabels(enrichment_names, fontsize=13, rotation=45)
# ylabel and title
ax.set_ylabel(r"$\log_2$(Fold Enrichment)", fontsize=14)
ax.set_title(title, fontsize=16)
# add significance asterisks
for col, sig in zip(bar, enrichment_pvalue):
fontweight = 'bold'
if sig < 0.0001:
asterisks = u'\u2217' * 4
elif sig < 0.001:
asterisks = u'\u2217' * 3
elif sig < 0.01:
asterisks = u'\u2217' * 2
elif sig < 0.05:
asterisks = u'\u2217'
else:
asterisks = 'ns'
fontweight = 'normal'
valign = 'bottom' if np.sign(col._height) == 1 else 'top'
ax.text(x=col.xy[0] + 0.5 * col._width,
y=col._height, s=asterisks, fontname='DejaVu Sans', fontweight=fontweight,
fontsize=12, horizontalalignment='center', verticalalignment=valign)
sns.despine()
plt.show()
return fig
def biotypes(self, ref: str = 'predefined'):
"""
Returns a DataFrame of the biotypes in the gene set and their count.
:type ref: str or pathlib.Path (default 'predefined')
:param ref: Path of the reference file used to determine biotype. \
Default is the path predefined in the settings file.
:Examples:
>>> from rnalysis import enrichment, filtering
>>> d = filtering.Filter("tests/test_deseq.csv")
>>> en = enrichment.FeatureSet(d)
>>> en.biotypes(ref='tests/biotype_ref_table_for_tests.csv')
gene
biotype
protein_coding 26
pseudogene 1
unknown 1
"""
ref = general._get_biotype_ref_path(ref)
ref_df = general.load_csv(ref)
general._biotype_table_assertions(ref_df)
ref_df.columns = ref_df.columns.str.lower()
not_in_ref = pd.Index(self.gene_set).difference(set(ref_df['gene']))
if len(not_in_ref) > 0:
warnings.warn(
f'{len(not_in_ref)} of the features in the Filter object do not appear in the Biotype Reference Table. ')
ref_df = ref_df.append( | pd.DataFrame({'gene': not_in_ref, 'biotype': 'not_in_biotype_reference'}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@file:compete_pctr_yes_test.py
@time:2019/6/12 21:10
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
import numpy as np
import time
tt = time.time()
fea = pd.DataFrame()
test_bid = | pd.read_csv('../usingData/test/test_bid.csv') | pandas.read_csv |
#%%
import numpy as np
import pandas as pd
import cmdstanpy
import arviz as az
# Load the datasets
calib_data = pd.read_csv('../../data/calibration/2021-04-05_hplc_calibration/processed/2021-04-05_NC_DM_calibration_relative_areas.csv')
glucose_data = pd.read_csv('../../data/metabolite_turnover/2021-04-04_REL606_glucose_turnover/processed/2021-04-04_REL606_glucose_turnover_relative_areas.csv')
acetate_data = pd.read_csv('../../data/metabolite_turnover/2021-04-27_REL606_acetate_turnover/processed/2021-04-27_REL606_acetate_turnover_relative_areas.csv')
# restrict the data to the relevant quantites
calib_data = calib_data[(calib_data['buffer_base']=='DM') &
(calib_data['compound'].isin(['glucose', 'acetate']))
][['carbon_conc_mM', 'rel_area_phosphate', 'compound']]
glucose_data = glucose_data[glucose_data['compound'].isin(['glucose', 'acetate'])
][['replicate', 'od_600nm', 'compound', 'rel_area_phosphate',
'date', 'carbon_source']]
acetate_data = acetate_data[acetate_data['compound']=='acetate'
][['replicate', 'od_600nm', 'compound', 'rel_area_phosphate',
'date', 'carbon_source']]
# Merge the turnover measurements and save
turnover = pd.concat([glucose_data, acetate_data], sort=False)
turnover.to_csv('../../data/collated_turnover_measurements.csv', index=False)
# %%
# Load and compile the inferrential model
model = cmdstanpy.CmdStanModel(stan_file='../stan/hierarchical_yield_coefficient.stan')
# %%
# Define the percentiles to compute
percs = [(2.5, 97.5), (12.5, 87.5), (25, 75), (37.5, 62.5), (47.5, 52.5)]
perc_labels = [95, 75, 50, 25, 5]
# Group by strain, carbon source, and then compound
param_samples, conc_samples = [], []
param_summary, conc_summary = pd.DataFrame([]), pd.DataFrame([])
yield_fits, calib_fits = [], []
for g, d in turnover.groupby(['carbon_source', 'compound']):
# Get the correct calibration data
calib = calib_data[calib_data['compound']==g[1]]
# Define the data dictionary
data_dict = {'J':d['replicate'].max(),
'N_yield': len(d),
'N_calib': len(calib),
'idx': d['replicate'].values.astype(int),
'calib_conc': calib['carbon_conc_mM'].values.astype(float),
'calib_rel_areas':calib['rel_area_phosphate'].values.astype(float),
'optical_density':d['od_600nm'].values.astype(float),
'yield_rel_areas':d['rel_area_phosphate'].values.astype(float)}
# Sample the inferrential model
samps = model.sample(data=data_dict)
samps = az.from_cmdstanpy(samps)
samps = samps.posterior.to_dataframe().reset_index()
# Tidy low-level parameters
_samps = samps[['yield_inter_dim_0', 'yield_slope_dim_0', 'yield_inter', 'yield_slope']]
_samps.drop_duplicates(inplace=True)
pairs = [['yield_inter_dim_0', 'yield_inter'],
['yield_slope_dim_0', 'yield_slope']]
dfs = []
for p in pairs:
_params = _samps[p]
_df = pd.DataFrame([])
_df['value'] = _params[p[1]]
_df['parameter'] = p[1]
_df['level'] = [f'replicate {v+1}' for v in _params[p[0]].values]
dfs.append(_df)
params = pd.concat(dfs)
# params.drop_duplicates(inplace=True)
# Tidy the hyper parameters
hyperparams = samps[['yield_inter_mu', 'yield_slope_mu']]
hyperparams.drop_duplicates(inplace=True)
hyperparams['level'] = 'hyperparameter'
hyperparams = hyperparams.melt('level', var_name='parameter')
hyperparams.loc[hyperparams['parameter']=='yield_slope_mu', 'parameter'] = 'yield_slope'
hyperparams.loc[hyperparams['parameter']=='yield_inter_mu', 'parameter'] = 'yield_inter'
# Tidy the calibration samples
calib_params = samps[['calib_slope', 'calib_inter', 'calib_sigma']]
calib_params.drop_duplicates(inplace=True)
calib_params['level'] = 'calibration'
calib_params = calib_params.melt('level', var_name='parameter')
# Save the parameter samples
params = pd.concat([params, hyperparams, calib_params], sort=False)
params['carbon_source'] = g[0]
params['compound_turnover'] = g[1]
params['strain'] = 'REL606'
param_samples.append(params)
# Tidy the concentration samples
yield_concs = samps[['yield_concs_dim_0', 'yield_concs']]
# yield_concs.drop_duplicates(inplace=True)
# Map the dimension to the od
yield_concs['od_600nm'] = [data_dict['optical_density'][i] for i in yield_concs['yield_concs_dim_0'].values]
yield_concs['replicate'] = [data_dict['idx'][i] for i in yield_concs['yield_concs_dim_0'].values]
yield_concs['strain'] = 'REL606'
yield_concs['carbon_source'] = g[0]
yield_concs['compound_turnover'] = g[1]
yield_concs.rename(columns={'yield_concs':'compound_conc_mM'}, inplace=True)
yield_concs.drop(columns=['yield_concs_dim_0'], inplace=True)
conc_samples.append(yield_concs)
# Compute the summary statistics for yield
for _g, _d in yield_concs.groupby(['replicate', 'od_600nm']):
mean_val = _d['compound_conc_mM'].mean()
median_val = _d['compound_conc_mM'].median()
ci_95_upper, ci_95_lower = np.percentile(_d['compound_conc_mM'], (97.5, 2.5))
ci_75_upper, ci_75_lower = np.percentile(_d['compound_conc_mM'], (87.5, 12.5))
conc_summary = conc_summary.append({
'strain':'REL606',
'carbon_source':g[0],
'compound_turnover': g[1],
'replicate':_g[0],
'od_600nm':_g[1],
'mean_val_mM':mean_val,
'median_val_mM':median_val,
'ci_95th_upper_mM': ci_95_upper,
'ci_95th_lower_mM': ci_95_lower,
'ci_75th_upper_mM': ci_75_upper,
'ci_75th_lower_mM':ci_75_lower
}, ignore_index=True)
# Compute the summary statistics for the parameters
for _g, _d in params.groupby(['level', 'parameter']):
mean_val = _d['value'].mean()
median_val = _d['value'].median()
ci_95_upper, ci_95_lower = np.percentile(_d['value'], (97.5, 2.5))
ci_75_upper, ci_75_lower = np.percentile(_d['value'], (87.5, 12.5))
param_summary = param_summary.append({
'strain':'REL606',
'carbon_source':g[0],
'compound_turnover': g[1],
'level':_g[0],
'parameter':_g[1],
'mean_val':mean_val,
'median_val':median_val,
'ci_95th_upper': ci_95_upper,
'ci_95th_lower': ci_95_lower,
'ci_75th_upper': ci_75_upper,
'ci_75th_lower':ci_75_lower
}, ignore_index=True)
# Compute the dependent variable ranges
conc_range = np.linspace(0, 30, 100)
# Isolate the calibration parameters
calib_slope = params[params['parameter']=='calib_slope']['value'].values
calib_inter = params[params['parameter']=='calib_inter']['value'].values
# Compute the calibration ranges
for perc, lab in zip(percs, perc_labels):
ci = np.zeros((2, len(conc_range)))
for i, c in enumerate(conc_range):
fit = calib_inter + calib_slope * c
ci[:, i] = np.percentile(fit, perc)
_df = pd.DataFrame([])
_df['conc_range_mM'] = conc_range
_df['rel_area_lower'] = ci[0, :]
_df['rel_area_upper'] = ci[1, :]
_df['percentile'] = lab
_df['strain'] = 'REL606'
_df['carbon_source'] = g[0]
_df['compound_turnover'] = g[1]
calib_fits.append(_df)
# Compute the yield fit range
min_od = 0.9 * d['od_600nm'].min()
max_od = 1.1 * d['od_600nm'].max()
od_range = np.linspace(min_od, max_od, 100)
for _g, _d in params.groupby(['level']):
if _g != 'calibration':
# Get the relevant parameters
yield_slope = _d[_d['parameter']=='yield_slope']['value'].values
yield_inter = _d[_d['parameter']=='yield_inter']['value'].values
for perc, lab in zip(percs, perc_labels):
ci = np.zeros((2, len(conc_range)))
for i, od in enumerate(od_range):
fit = yield_inter + yield_slope * od
ci[:, i] = np.percentile(fit, perc)
_df = pd.DataFrame([])
_df['od_600nm'] = od_range
_df['conc_lower_mM'] = ci[0, :]
_df['conc_upper_mM'] = ci[1, :]
_df['percentile'] = lab
_df['level'] = _g
_df['strain'] = 'REL606'
_df['carbon_source'] = g[0]
_df['compound_turnover'] = g[1]
yield_fits.append(_df)
# Concateate everything
param_samples = pd.concat(param_samples, sort=False)
conc_samples = pd.concat(conc_samples, sort=False)
calib_fits = | pd.concat(calib_fits, sort=False) | pandas.concat |
from candiy_lemon import lemon
import sys, os
import numpy as np
import pandas as pd
# List of dictionaries to keep track of parts of the file
# Key: reference pdbid, Value: path to .mmtf file
pathDict = {}
# Key: reference pdbID, Value: chemical ID of ligand bound to reference protein for removal
referenceLigandDict = {}
# Key: reference pdbID, Value: list of sm or non-sm protein
referenceDict = {}
# Key: reference pdbID, Value: list of proteins to aling to reference (like in pinc)
alignProtDict = {}
# Key: align protein pdbID, Value: ligand chemical ID code for ligand removal
alignProtLigandDict = {}
# Key: pdbID, Value: chemical id for SM ligand
pdbIDSMDict = {}
# Key: pdbID, Value: tuple(resCode, chainID, residue ID)
pdbIDNonSMDict = {}
# Key: pdbID, Value: chemical id for SM ligand
noAlignSMDict = {}
# Key: pdbID, Value: tuple(resCode, chainID, residue ID)
noAlignNonSMDict = {}
# Maximum distance allowed between a residue and the ligand
maximumResidueDistance = 25.0
entries = set()
# Method for getting binding affinity information
# Input is list of tuple with each tuple -> (pdb_id,lig_code)
def get_bind_affinty(bind_tup_list):
# We can get the csv file directly from a url using pandas
# Use custom columns due to some of the columns not being needed
binding_moad_url = "http://bindingmoad.org/files/csv/every_bind.csv"
binding_moad_data = pd.read_csv(binding_moad_url,usecols=[0,2,3,4,5,7,8,9],header=None)
# Set the columns manually because there is no header given
binding_moad_data.columns = ["ec","pdb_id","lig_code","validity","affnty_type","affnty_val","affnty_units","SMILES"]
# Create our dictionaries and varaibles here
pdb_id_dict = {}
lig_id_dict = {}
cur_pdb_id = ""
# Go through each of the row and add the data to the dictionaries
for index,row in binding_moad_data.iterrows():
if pd.isna(row["pdb_id"]) == False:
cur_pdb_id = row["pdb_id"]
elif (pd.isna(row["lig_code"]) == False) and ( | pd.isna(row["affnty_type"]) | pandas.isna |
import logging
import os
import pathlib
import random
import sys
import time
from itertools import chain
from collections import Iterable
import gc
import numpy as np
import pandas as pd
import torch
from PIL import Image
import matplotlib.pyplot as plt
from attrdict import AttrDict
from tqdm import tqdm
from pycocotools import mask as cocomask
from sklearn.model_selection import BaseCrossValidator
from steppy.base import BaseTransformer, Step
from steppy.utils import get_logger
import yaml
from imgaug import augmenters as iaa
import imgaug as ia
import torch
NEPTUNE_CONFIG_PATH = str(pathlib.Path(__file__).resolve().parents[1] / 'neptune.yaml')
logger = get_logger()
def read_yaml(fallback_file=NEPTUNE_CONFIG_PATH):
with open(fallback_file) as f:
config = yaml.load(f)
return AttrDict(config)
def init_logger():
logger = logging.getLogger('salt-detection')
logger.setLevel(logging.INFO)
message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H-%M-%S')
# console handler for validation info
ch_va = logging.StreamHandler(sys.stdout)
ch_va.setLevel(logging.INFO)
ch_va.setFormatter(fmt=message_format)
# add the handlers to the logger
logger.addHandler(ch_va)
return logger
def get_logger():
return logging.getLogger('salt-detection')
def create_submission(meta, predictions):
output = []
for image_id, mask in zip(meta['id'].values, predictions):
rle_encoded = ' '.join(str(rle) for rle in run_length_encoding(mask))
output.append([image_id, rle_encoded])
submission = | pd.DataFrame(output, columns=['id', 'rle_mask']) | pandas.DataFrame |
# transformations.py
import os
import glob
import time
import datetime
import seaborn as sns
import pandas as pd
import py4cytoscape as p4c
import numpy as np
from itertools import combinations
# ---------------------------------------------------
def get_keys(keyfile):
"""Get Biochemical Transformation Keys"""
key = pd.read_csv(keyfile)
key['mf'] = key['mf'].astype(float).apply(lambda x: '%.6f' % x)
key = key.sort_values(by=['mf'])
key_tuples = list(zip(key.Group, key.Transformation, key.Formula, key.mf))
return key_tuples
# --------------------------------------------------
def calculate_transformations(df, keys, path):
"""Function to calculate transformations for transformation networks"""
# Create a dataframe that has the masses of the peaks that are present in each sample, and 0 in the peaks that are
# not in that sample
df_transf = pd.pivot_table(df, values='NormIntensity', index=['Mass'],
columns=['SampleID']).reset_index()
df_transf['Mass'] = df_transf['Mass'].astype(float).apply(lambda x: '%.6f' % x)
df_transf.replace([0, np.nan], ['X', 'X'], inplace=True)
for col in df_transf.columns:
df_transf[col] = np.where((df_transf[col] != 'X'), df_transf['Mass'], df_transf[col])
df_transf = df_transf.drop('Mass', axis=1)
df_transf = df_transf.replace('X', 0)
print('Calculating m/z differences per sample column can take a little while...\n')
i = 1
for sample in sorted(df_transf.columns):
print(f'[{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S %p")}] {i}\\{len(df_transf.columns)}\t{sample}')
mz_list = set(float(x) for x in df_transf[sample] if float(x) > 0)
print('\t\tTotal m/z values', len(mz_list))
# make m/z substractions in all versus all fashion
# doing all vs all the subtractions and filter
result_tuples = [(x, y, round(abs(x - y), 6)) for x, y in combinations(mz_list, 2) if 1 < abs(x - y) < 766]
result_tuples = [(
r[0], r[1], r[2],
k[0], k[1], k[2], k[3]
) for r in result_tuples for k in keys
if r[2] - 0.001 <= float(k[3]) <= r[2] + 0.001]
if len(result_tuples) == 0 :
print('No transformations were found for this sample, moving to the next one')
else :
# make np.array from list of lists
result_tuples = np.vstack(result_tuples)
# make pd df
result_df = pd.DataFrame(result_tuples, columns=[
'Feature_X', 'Feature_Y', 'Difference',
'Group', 'Transformation', 'Formula', 'mf'])
result_df['SampleID'] = sample
print(' Saving results')
filename = os.path.join(path, 'transformations_' + sample + '.csv')
result_df.to_csv(filename, index=False)
# Compile counts
result_counts = pd.DataFrame(
result_df.groupby(['SampleID', 'Group', 'Transformation', 'Formula']).size().reset_index(name='Counts'))
total_transformations = sum(result_counts['Counts'])
result_counts['Perc_Counts'] = result_counts['Counts'] / total_transformations
result_counts = result_counts.sort_values(by="Counts")
# Save final_counts
filename = os.path.join(path, 'counts_' + sample + '.csv')
result_counts.to_csv(filename, index=False)
i = i + 1
print("\u2713 Done!")
return
# --------------------------------------------------
def summarize_transformations(path):
"""Create a table summarizing the number of transformations of each sample"""
files_path = os.path.join(path, 'transf_by_sample', 'counts_*.csv')
files = glob.glob(files_path)
summary_counts = pd.DataFrame()
for file in files:
df = pd.read_csv(file)
summary_counts = pd.concat([summary_counts, df], axis=0)
filename = os.path.join(path, 'Transformations_summary_counts.csv')
summary_counts.to_csv(filename, index=False)
files_path = os.path.join(path, 'transf_by_sample', 'transformations_*.csv')
files = glob.glob(files_path)
summary_transf = pd.DataFrame()
for file in files:
df = pd.read_csv(file)
summary_transf = pd.concat([summary_transf, df], axis=0)
filename = os.path.join(path, 'Transformations_summary_all.csv')
summary_transf.to_csv(filename, index=False)
return
# --------------------------------------------------
def get_node_table(df, path):
"""Create a node table for the transformation networks"""
node_table = df[['Mass', 'C', 'H', 'O', 'N', 'S', 'P', 'OC', 'HC', 'NOSC',
'GFE', 'Class', 'MolecularFormula', 'El_comp']].drop_duplicates('Mass')
filename = os.path.join(path, 'node_table.csv')
node_table.to_csv(filename, index=False)
return node_table
# --------------------------------------------------
def create_cytoscape_network(node_table, path):
"""Create a cytoscape network using the node table"""
node_table['Mass'] = round(node_table['Mass'], 4)
node_table['Mass'] = node_table['Mass'].astype(str)
node_table = node_table.rename(columns={'Mass': 'id'})
# Create a vector of colors for the compound classes
mol_classes = list(np.unique(node_table['Class']))
node_colors = sns.color_palette('Set3', len(mol_classes)).as_hex()
network_stats = []
# Create a list of the transformation files to be used as the edge tables for the networks
files_path = os.path.join(path, 'transf_by_sample', 'transformations_*.csv')
edge_files = glob.glob(files_path)
i = 1
for file in edge_files:
edge_table = | pd.read_csv(file) | pandas.read_csv |
import classical
import quantum
from TOKEN import ACCESS_TOKEN
import pandas as pd
from tabulate import tabulate
sbits = [0, 1]
tbits = [(0, 0), (0, 1), (1, 0), (1, 1)]
classicalTable = | pd.DataFrame(columns=['bit', 'not']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = | pd.PeriodIndex([x[1] for x in result]) | pandas.PeriodIndex |
import requests
import pandas as pd
import re
from bs4 import BeautifulSoup
url=requests.get("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=pd.DataFrame([])
i=0
j=0
b=[]
d1=pd.DataFrame()
for j in all_t[0].findAll('td'):
b.append(j.text)
while(i<=(208-13)):
d1=d1.append(pd.DataFrame([b[i:i+13]]) )
i=i+13
d1.apply(pd.to_numeric, errors='ignore')
listq=pd.Series.tolist(d1[0:16][0])
list1=pd.Series.tolist(d1[0:16][1])
list2=pd.Series.tolist(d1[0:16][2])
list3=pd.Series.tolist(d1[0:16][3])
list4=pd.Series.tolist(d1[0:16][4])
list5=pd.Series.tolist(d1[0:16][5])
list6=pd.Series.tolist(d1[0:16][6])
list7=pd.Series.tolist(d1[0:16][7])
list8=pd.Series.tolist(d1[0:16][8])
list9=pd.Series.tolist(d1[0:16][9])
list10=pd.Series.tolist(d1[0:16][10])
#forecast table
c=[]
for j in all_t[1].findAll('td'):
c.append(j.text)
bv=pd.DataFrame()
i=0
while(i<=(91-13)):
bv=bv.append(pd.DataFrame([c[i:i+13]]) )
i=i+13
listq1=pd.Series.tolist(bv[0:7][0])
list11=pd.Series.tolist(bv[0:7][1])
list21=pd.Series.tolist(bv[0:7][2])
list31= | pd.Series.tolist(bv[0:7][3]) | pandas.Series.tolist |
# -*- coding: utf-8 -*-
"""
The Local version of the app.
"""
import base64
import io
import os
import time
import sys
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
import plotly.graph_objs as go
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from node2vec import Node2Vec
from itertools import zip_longest
import umap
from sklearn.decomposition import PCA
import hdbscan
# networks
import random_network
import disconnected_components
import disconnected_components_with_time
import connected_stars
import disconnected_stars
import star_graph
import grid_graph
import sklearn.cluster as cluster
def merge(a, b):
return dict(a, **b)
def omit(omitted_keys, d):
return {k: v for k, v in d.items() if k not in omitted_keys}
def getDegreeByNode(walk, G):
Degree_list = []
for node in walk:
Degree_list = Degree_list + [str(G.degree[int(node)]) + '_']
return (Degree_list)
def getTimeClassByNode(walk, G):
TimeClass_list = []
for node in walk:
TimeClass_list = TimeClass_list + [G.nodes[int(node)]['TimeClass']]
return (TimeClass_list)
def input_field(title, state_id, state_value, state_max, state_min):
"""Takes as parameter the title, state, default value and range of an input field, and output a Div object with the given specifications."""
return html.Div([
html.P(title,
style={
'display': 'inline-block',
'verticalAlign': 'mid',
'marginRight': '5px',
'margin-bottom': '0px',
'margin-top': '0px'}),
html.Div([
dcc.Input(
id=state_id,
type='number',
value=state_value,
max=state_max,
min=state_min,
size=7)],
style={
'display': 'inline-block',
'margin-top': '0px',
'margin-bottom': '0px'}
)])
def NamedSlider(name, short, min, max, step, val, marks=None):
if marks:
step = None
else:
marks = {i: i for i in range(min, max + 1, step)}
return html.Div(
style={'margin': '0px 5px 30px 0px'},
children=[
f"{name}:",
html.Div(style={'margin-left': '5px'}, children=[
dcc.Slider(id=f'slider-{short}',
min=min,
max=max,
marks=marks,
step=step,
value=val)
])
])
tabs_styles = {
'height': '44px'
}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold'
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'padding': '6px'
}
# Generate the default scatter plot
tsne_df = pd.read_csv("data/tsne_3d.csv", index_col=0)
data = []
for idx, val in tsne_df.groupby(tsne_df.index):
idx = int(idx)
scatter = go.Scatter3d(
name=f"Digit {idx}",
x=val['x'],
y=val['y'],
z=val['z'],
mode='markers',
marker=dict(
size=2.5,
symbol='circle-dot'
)
)
data.append(scatter)
# Layout for the t-SNE graph
tsne_layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
local_layout = html.Div([
# In-browser storage of global variables
html.Div(
id="data-df-and-message",
style={'display': 'none'}
),
html.Div(
id="label-df-and-message",
style={'display': 'none'}
),
# Main app
html.Div([
html.H2(
children='Network Embedding',
id='title1',
style={
'float': 'left',
'margin-top': '20px',
'margin-bottom': '20px',
'margin-left': '7px'
}
),
html.Img(
src="http://www.sensafety.org/img/core-img/snet-logo.png",
style={
'height': '80px',
'float': 'right',
'margin': '0px 50px 0px 0px'
}
)
],
className="row"
),
html.Div([
html.Div([
# The network
dcc.Graph(
id='network-2d-plot',
figure={
'data': random_network.data,
'layout': random_network.layout
},
style={
'height': '50vh',
},
)
],
id="network-plot-div",
className="eight columns"
),
html.Div([
html.H4(
children='Node2vec',
id='network_embedding_h4'
),
dcc.Dropdown(
id='dropdown-network',
searchable=False,
options=[
{'label': 'Random network', 'value': 'random_network'},
{'label': 'Star', 'value': 'star_graph'},
{'label': 'Disconnected stars', 'value': 'disconnected_stars'},
{'label': 'Connected stars', 'value': 'connected_stars'},
{'label': 'Disconnected components', 'value': 'disconnected_components'},
{'label': 'Disconnected components with time', 'value': 'disconnected_components_with_time'},
{'label': 'Grid', 'value': 'grid_graph'}
],
value='random_network'
),
html.H6(
children='Features:',
id='features_h6',
style={'margin': '15px 0px 0px 0px'}
),
dcc.Checklist(
id='features_node2vec',
options=[
{'label': 'Location', 'value': 'Location'},
{'label': 'Degree', 'value': 'Degree'},
{'label': 'Time', 'value': 'Time'}
],
values=['Location'],
labelStyle={
'display': 'inline-block',
'margin-right': '7px',
'margin-left': '7px',
'font-weight': 300
},
style={
'display': 'inline-block',
'margin-left': '7px'
}
),
NamedSlider(
name="Dimensions",
short="dimensions",
min=5,
max=128,
step=None,
val=10,
marks={i: i for i in [5, 10, 32, 64, 128]}
),
NamedSlider(
name="Walk length",
short="walk_length",
min=6,
max=14,
step=None,
val=8,
marks={i: i for i in [6, 8, 10, 12, 14]}
),
NamedSlider(
name="Number of walks per node",
short="num_walks",
min=3,
max=6,
step=None,
val=4,
marks={i: i for i in [3, 4, 5, 6]}
),
html.Button(
children='Generate embeddings',
id='network-embedding-generation-button',
n_clicks=0
),
html.Div(id='output-state')
],
className="four columns",
style={
'padding': 20,
'margin': 5,
'borderRadius': 5,
'border': 'thin lightgrey solid',
# Remove possibility to select the text for better UX
'user-select': 'none',
'-moz-user-select': 'none',
'-webkit-user-select': 'none',
'-ms-user-select': 'none'
}
)
],
className="row"
),
html.Div([
html.H2(
children='Dimensionality Reduction',
id='title2',
style={
'float': 'left',
'margin-top': '20px',
'margin-bottom': '20px',
'margin-left': '7px'
}
)
],
className="row"
),
################## TSNE
html.Div([
html.Div([
# Data about the graph
html.Div(
id="kl-divergence",
style={'display': 'none'}
),
html.Div(
id="end-time",
style={'display': 'none'}
),
html.Div(
id="error-message",
style={'display': 'none'}
),
# The graph
dcc.Graph(
id='tsne-3d-plot',
figure={
'data': data,
'layout': tsne_layout
},
style={
'height': '60vh',
},
)
],
id="tsne-plot-div",
className="eight columns"
),
html.Div([
html.H4( "t-SNE Parameters",
id='dim_reduction'
),
input_field("Perplexity:", "perplexity-state", 20, 50, 5),
input_field("Number of Iterations:", "n-iter-state", 400, 5000, 250),
input_field("Learning Rate:", "lr-state", 10, 1000, 10),
html.Button(
id='tsne-train-button',
n_clicks=0,
children='Start Training t-SNE'
),
dcc.Upload(
id='upload-data',
children=html.A('Upload your input data here.'),
style={
'height': '45px',
'line-height': '45px',
'border-width': '1px',
'border-style': 'dashed',
'border-radius': '5px',
'text-align': 'center',
'margin-top': '5px',
'margin-bottom': '5 px'
},
multiple=False,
max_size=-1
),
dcc.Upload(
id='upload-label',
children=html.A('Upload your labels here.'),
style={
'height': '45px',
'line-height': '45px',
'border-width': '1px',
'border-style': 'dashed',
'border-radius': '5px',
'text-align': 'center',
'margin-top': '5px',
'margin-bottom': '5px'
},
multiple=False,
max_size=-1
),
html.Div([
html.P(id='upload-data-message',
style={
'margin-bottom': '0px'
}),
html.P(id='upload-label-message',
style={
'margin-bottom': '0px'
}),
html.Div(id='training-status-message',
style={
'margin-bottom': '0px',
'margin-top': '0px'
}),
html.P(id='error-status-message')
],
id='output-messages',
style={
'margin-bottom': '2px',
'margin-top': '2px'
}
)
],
className="four columns",
style={
'padding': 20,
'margin': 5,
'borderRadius': 5,
'border': 'thin lightgrey solid',
# Remove possibility to select the text for better UX
'user-select': 'none',
'-moz-user-select': 'none',
'-webkit-user-select': 'none',
'-ms-user-select': 'none'
}
)
],
className="row"
),
################## UMAP
html.Div([
html.Div([
# The graph
dcc.Graph(
id='umap-3d-plot',
figure={
'data': data,
'layout': tsne_layout
},
style={
'height': '60vh',
},
)
],
id="umap-plot-div",
className="eight columns"
),
html.Div([
html.H4(
'UMAP Parameters',
id='umap_h4'
),
input_field("# neighbors:", "n_neighbors", 20, 200, 2), # controls how UMAP balances local versus global structure in the data
NamedSlider( # controls how tightly UMAP is allowed to pack points together
name="Minimum distance",
short="min_dist",
min=0.0,
max=1.0,
step=0.1,
val=0.2,
marks={i: i for i in [.0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1]}
),
# n_neighbors,min_dist, distance_metric
dcc.Dropdown(
id='distance_metric', # controls how distance is computed in the ambient space of the input dat
searchable=False,
options=[
# TODO: Generate more data
{'label': 'euclidean', 'value': 'euclidean'},
{'label': 'manhattan', 'value': 'manhattan'},
{'label': 'mahalanobis', 'value': 'mahalanobis'}
],
value='euclidean',
style = {
'margin-top': '15px'
}
),
html.Button(
id='umap-train-button',
n_clicks=0,
children='Start Training UMAP',
style = {
'margin-top': '15px'
}
),
],
className="four columns",
style={
'padding': 20,
'margin': 5,
'borderRadius': 5,
'border': 'thin lightgrey solid',
# Remove possibility to select the text for better UX
'user-select': 'none',
'-moz-user-select': 'none',
'-webkit-user-select': 'none',
'-ms-user-select': 'none'
}
)
],
className="row"
),
################## PCA
html.Div([
html.Div([
# The graph
dcc.Graph(
id='pca-3d-plot',
figure={
'data': data,
'layout': tsne_layout
},
style={
'height': '60vh',
},
)
],
id="pca-plot-div",
className="eight columns"
),
html.Div([
html.H4(
'PCA',
id='pca_h4'
),
html.Button(
id='pca-train-button',
n_clicks=0,
children='Start Training PCA',
style={
'margin-top': '15px'
}
),
],
className="four columns",
style={
'padding': 20,
'margin': 5,
'borderRadius': 5,
'border': 'thin lightgrey solid',
# Remove possibility to select the text for better UX
'user-select': 'none',
'-moz-user-select': 'none',
'-webkit-user-select': 'none',
'-ms-user-select': 'none'
}
)
],
className="row"
),
######### Clustering
html.Div([
html.H2(
children='Cluster Analysis',
id='title3',
style={
'float': 'left',
'margin-top': '20px',
'margin-bottom': '20px',
'margin-left': '7px'
}
)
],
className="row"
),
html.Div([
html.Div([
# The graph
dcc.Graph(
id='clustering-3d-plot',
figure={
'data': data,
'layout': tsne_layout
},
style={
'height': '60vh',
},
)
],
id="clustering-plot-div",
className="eight columns"
),
html.Div([
html.H4(
'Cluster analysis',
id='clustering_h4'
),
dcc.Dropdown(
id='dropdown-clustering',
searchable=False,
options=[
{'label': 'k-means', 'value': 'k_means'},
{'label': 'HDBSCAN', 'value': 'hdbscan'}
],
value='k_means',
style={'margin': '0px 0px 15px 0px'}
),
dcc.Dropdown(
id='dropdown-dimreduction',
searchable=False,
options=[
{'label': 't-SNE', 'value': 'tsne'},
{'label': 'UMAP', 'value': 'umap'},
{'label': 'PCA', 'value': 'pca'}
],
value='umap',
style={'margin': '0px 0px 15px 0px'}
),
input_field("k = ", "k_parameter", 2, 10, 2),
NamedSlider(
name="Minimum cluster size:",
short="min_cl_size",
min=5,
max=40,
step=5,
val=10,
marks={i: i for i in [5, 10, 15, 20, 25, 30, 35, 40]}
),
NamedSlider(
name="Minimum number of samples:",
short="min_num_samp",
min=5,
max=40,
step=5,
val=10,
marks={i: i for i in [5, 10, 15, 20, 25, 30, 35, 40]}
),
NamedSlider(
name="Alpha",
short="alpha",
min=0.1,
max=1.3,
step=0.1,
val=0.2,
marks={i: i for i in [0.1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 1.1, 1.2, 1.3]}
),
html.Button(
id='cluster-analysis-button',
n_clicks=0,
children='Run cluster analysis',
style={
'margin-top': '15px'
}
),
],
className="four columns",
style={
'padding': 20,
'margin': 5,
'borderRadius': 5,
'border': 'thin lightgrey solid',
# Remove possibility to select the text for better UX
'user-select': 'none',
'-moz-user-select': 'none',
'-webkit-user-select': 'none',
'-ms-user-select': 'none'
}
)
],
className="row"
),
],
className="container",
style={
'width': '90%',
'max-width': 'none',
'font-size': '1.5rem'
}
)
#########################################################
def local_callbacks(app):
@app.callback(Output('clustering-plot-div', 'children'),
[Input('cluster-analysis-button', 'n_clicks')],
[State('dropdown-clustering', 'value'),
State('dropdown-dimreduction', 'value'),
State('k_parameter', 'value'),
State('slider-min_cl_size', 'value'),
State('slider-min_num_samp', 'value'),
State('slider-alpha', 'value'),
State('n_neighbors', 'value'),
State('slider-min_dist', 'value'),
State('distance_metric', 'value'),
State('perplexity-state', 'value'),
State('n-iter-state', 'value'),
State('lr-state', 'value')])
def cluster_analysis(n_clicks, clustering, dimreduction,
k_parameter, # k-means params
min_cl_size, min_num_samp, alpha, # HDBSCAN params
n_neighbors, min_dist, metric, # UMAP params
perplexity, n_iter, learning_rate # t-SNE params
):
if n_clicks <= 0:
global data
else:
data_df = np.array(pd.read_csv("data/output_embeddings.csv"))
#### k-means
if clustering == 'k_means':
if dimreduction == 'umap':
umap_ = umap.UMAP(n_neighbors=n_neighbors,
min_dist=min_dist,
n_components=3,
metric=metric,
random_state=42)
embeddings = umap_.fit_transform(data_df)
kmeans_labels = pd.DataFrame(cluster.KMeans(n_clusters=k_parameter, random_state=42).fit_predict(embeddings), columns=['label'])
umap_data_df = pd.DataFrame(embeddings, columns=['x', 'y', 'z'])
combined_df = umap_data_df.join(kmeans_labels)
elif dimreduction == 'tsne':
tsne = TSNE(n_components=3,
perplexity=perplexity,
learning_rate=learning_rate,
n_iter=n_iter)
embeddings = tsne.fit_transform(data_df)
kmeans_labels = pd.DataFrame(cluster.KMeans(n_clusters=k_parameter, random_state=42).fit_predict(embeddings), columns=['label'])
tsne_data_df = pd.DataFrame(embeddings, columns=['x', 'y', 'z'])
combined_df = tsne_data_df.join(kmeans_labels)
elif dimreduction == 'pca':
pca = PCA(n_components=3, svd_solver='full')
embeddings = pca.fit_transform(data_df)
kmeans_labels = pd.DataFrame(cluster.KMeans(n_clusters=k_parameter, random_state=42).fit_predict(embeddings), columns=['label'])
pca_data_df = pd.DataFrame(embeddings, columns=['x', 'y', 'z'])
combined_df = pca_data_df.join(kmeans_labels)
#### HDBSCAN
elif clustering == 'hdbscan':
if dimreduction == 'umap':
umap_ = umap.UMAP(n_neighbors=n_neighbors,
min_dist=min_dist,
n_components=3,
metric=metric,
random_state=42)
embeddings = umap_.fit_transform(data_df)
hdbs = hdbscan.HDBSCAN(min_cluster_size=min_cl_size,
min_samples=min_num_samp,
alpha=alpha,
metric='euclidean')
hdbscanoutput = hdbs.fit(embeddings)
hdbscan_labels = pd.DataFrame(hdbscanoutput.labels_, columns=['label'])
umap_data_df = pd.DataFrame(embeddings, columns=['x', 'y', 'z'])
combined_df = umap_data_df.join(hdbscan_labels)
elif dimreduction == 'tsne':
tsne = TSNE(n_components=3,
perplexity=perplexity,
learning_rate=learning_rate,
n_iter=n_iter)
embeddings = tsne.fit_transform(data_df)
hdbs = hdbscan.HDBSCAN(min_cluster_size=min_cl_size,
min_samples=min_num_samp,
alpha=alpha,
metric='euclidean')
hdbscanoutput = hdbs.fit(embeddings)
hdbscan_labels = pd.DataFrame(hdbscanoutput.labels_, columns=['label'])
tsne_data_df = pd.DataFrame(embeddings, columns=['x', 'y', 'z'])
combined_df = tsne_data_df.join(hdbscan_labels)
elif dimreduction == 'pca':
pca = PCA(n_components=3, svd_solver='full')
embeddings = pca.fit_transform(data_df)
hdbs = hdbscan.HDBSCAN(min_cluster_size=min_cl_size,
min_samples=min_num_samp,
alpha=alpha,
metric='euclidean')
hdbscanoutput = hdbs.fit(embeddings)
hdbscan_labels = pd.DataFrame(hdbscanoutput.labels_, columns=['label'])
pca_data_df = pd.DataFrame(embeddings, columns=['x', 'y', 'z'])
combined_df = pca_data_df.join(hdbscan_labels)
data = []
# Group by the values of the label
for idx, val in combined_df.groupby('label'):
scatter = go.Scatter3d(
name=idx,
x=val['x'],
y=val['y'],
z=val['z'],
mode='markers',
marker=dict(
size=2.5,
symbol='circle-dot'
)
)
data.append(scatter)
return [
# The clustered graph
dcc.Graph(
id='clustering-3d-plot',
figure={
'data': data,
'layout': tsne_layout
},
style={
'height': '60vh',
},
)
]
@app.callback(Output('network-plot-div', 'children'),
[Input('dropdown-network', 'value')])
def update_network(network):
if network == 'random_network':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': random_network.data,
'layout': random_network.layout
},
style={
'height': '50vh',
},
)
]
elif network == 'star_graph':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': star_graph.data,
'layout': star_graph.layout
},
style={
'height': '50vh',
},
)
]
elif network == 'disconnected_stars':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': disconnected_stars.data,
'layout': disconnected_stars.layout
},
style={
'height': '50vh',
},
)
]
elif network == 'connected_stars':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': connected_stars.data,
'layout': connected_stars.layout
},
style={
'height': '50vh',
},
)
]
elif network == 'disconnected_components':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': disconnected_components.data,
'layout': disconnected_components.layout
},
style={
'height': '50vh',
},
)
]
elif network == 'disconnected_components_with_time':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': disconnected_components_with_time.data,
'layout': disconnected_components_with_time.layout
},
style={
'height': '50vh',
},
)
]
elif network == 'grid_graph':
return [
# The graph
dcc.Graph(
id='network-2d-plot',
figure={
'data': grid_graph.data,
'layout': grid_graph.layout
},
style={
'height': '50vh',
},
)
]
# Network embedding Button Click --> Generate embeddings
@app.callback(Output('output-state', 'children'),
[Input('network-embedding-generation-button', 'n_clicks')],
[State('dropdown-network','value'),
State('features_node2vec', 'values'),
State('slider-dimensions', 'value'),
State('slider-walk_length', 'value'),
State('slider-num_walks', 'value')
])
def output_walks(n_clicks, network, features, dimensions, walk_length, num_walks):
if network == 'random_network':
g = random_network.G
elif network == 'disconnected_components':
g = disconnected_components.G
elif network == 'disconnected_components_with_time':
g = disconnected_components_with_time.G
elif network == 'connected_stars':
g = connected_stars.G
elif network == 'disconnected_stars':
g = disconnected_stars.G
elif network == 'star_graph':
g = star_graph.G
elif network == 'grid_graph':
g = grid_graph.G
if n_clicks > 0:
node2vec = Node2Vec(g,
dimensions=dimensions,
walk_length=walk_length, # How many nodes are in each random walk
num_walks=num_walks, # Number of random walks to be generated from each node in the graph
workers=4)
print("Original walks:\n", "\n\n".join(map(str, node2vec.walks[0:3])), file=sys.stderr)
if features == ['Location', 'Degree', 'Time']:
print('Location, Degree, Time', file=sys.stderr)
degree_formatted_walks = [getDegreeByNode(walk, g) for walk in node2vec.walks]
time_formatted_walks = [getTimeClassByNode(walk, g) for walk in node2vec.walks]
formatted_walks = [[j for i in zip_longest(a, b, c, fillvalue=None) for j in i][:-1] for a, b, c in
list(zip(node2vec.walks, degree_formatted_walks, time_formatted_walks))]
print("location_Degree_Time_formatted_walks:\n", "\n\n".join(map(str, formatted_walks[0:3])), file=sys.stderr)
node2vec.walks = formatted_walks
model = node2vec.fit(window=10, min_count=1)
nodes_str = [x for x in model.wv.vocab if str.isdigit(x)]
print(nodes_str[0:5], file=sys.stderr)
embeddings = np.array([model.wv[x] for x in nodes_str])
nodes = [int(i) for i in nodes_str]
labels = [g.nodes[node]['TimeClass'] for node in nodes]
print(embeddings.shape, file=sys.stderr)
elif features == ['Location', 'Degree']:
print('Location, Degree', file=sys.stderr)
degree_formatted_walks = [getDegreeByNode(walk, g) for walk in node2vec.walks]
formatted_walks = [[j for i in zip_longest(a, b, fillvalue=None) for j in i][:-1] for a, b in list(zip(node2vec.walks, degree_formatted_walks))]
print("Degree_location_formatted_walks:\n", "\n\n".join(map(str, formatted_walks[0:3])), file=sys.stderr)
node2vec.walks = formatted_walks
model = node2vec.fit(window=10, min_count=1)
nodes_str = [x for x in model.wv.vocab if str.isdigit(x)]
print(nodes_str[0:5], file=sys.stderr)
embeddings = np.array([model.wv[x] for x in nodes_str])
nodes = [int(i) for i in nodes_str]
labels = [g.degree[node] for node in nodes]
print(embeddings.shape, file=sys.stderr)
elif features == ['Location', 'Time']:
print('Location, Time', file=sys.stderr)
time_formatted_walks = [getTimeClassByNode(walk, g) for walk in node2vec.walks]
formatted_walks = [[j for i in zip_longest(a, b, fillvalue=None) for j in i][:-1] for a, b in list(zip(node2vec.walks, time_formatted_walks))]
print("Time_location_formatted_walks:\n", "\n\n".join(map(str, formatted_walks[0:3])), file=sys.stderr)
node2vec.walks = formatted_walks
model = node2vec.fit(window=10, min_count=1)
nodes_str = [x for x in model.wv.vocab if str.isdigit(x)]
print(nodes_str[0:5], file=sys.stderr)
embeddings = np.array([model.wv[x] for x in nodes_str])
nodes = [int(i) for i in nodes_str]
labels = [g.nodes[node]['TimeClass'] for node in nodes]
print("labels = ", labels)
print(embeddings.shape, file=sys.stderr)
else:
print('Location', file=sys.stderr)
model = node2vec.fit(window=10, min_count=1)
nodes_str = [x for x in model.wv.vocab if str.isdigit(x)]
print(nodes_str[0:5], file=sys.stderr)
embeddings = np.array([model.wv[x] for x in nodes_str])
labels = [int(i) for i in nodes_str]
print(embeddings.shape, file=sys.stderr)
# Writing the data:
np.savetxt('data/output_embeddings.csv', np.insert(embeddings, 0, np.arange(dimensions, dtype=int), axis=0), delimiter=',', fmt='%.4f')
with open('data/output_labels.csv', 'w') as f:
f.write("0\n")
for item in labels:
f.write("%s\n" % item)
return u'''
The Button has been pressed {} times,
Features are {},
Dimensions = {},
walk_length = {},
num_walks = {}.
'''.format(n_clicks, features, dimensions, walk_length, num_walks)
def parse_content(contents, filename):
"""This function parses the raw content and the file names, and returns the dataframe containing the data, as well
as the message displaying whether it was successfully parsed or not."""
if contents is None:
return None, ""
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')))
print('df = ', df)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
else:
return None, 'The file uploaded is invalid.'
except Exception as e:
print(e)
return None, 'There was an error processing this file.'
return df, f'{filename} successfully processed.'
# Uploaded data --> Hidden Data Div
@app.callback(Output('data-df-and-message', 'children'),
[Input('upload-data', 'contents'),
Input('upload-data', 'filename')])
def parse_data(contents, filename):
data_df, message = parse_content(contents, filename)
if data_df is None:
return [None, message]
elif data_df.shape[1] < 3:
return [None, f'The dimensions of {filename} are invalid.']
return [data_df.to_json(orient="split"), message]
# Uploaded labels --> Hidden Label div
@app.callback(Output('label-df-and-message', 'children'),
[Input('upload-label', 'contents'),
Input('upload-label', 'filename')])
def parse_label(contents, filename):
label_df, message = parse_content(contents, filename)
if label_df is None:
return [None, message]
elif label_df.shape[1] != 1:
return [None, f'The dimensions of {filename} are invalid.']
return [label_df.to_json(orient="split"), message]
# Hidden Data Div --> Display upload status message (Data)
@app.callback(Output('upload-data-message', 'children'),
[Input('data-df-and-message', 'children')])
def output_upload_status_data(data):
return data[1]
# Hidden Label Div --> Display upload status message (Labels)
@app.callback(Output('upload-label-message', 'children'),
[Input('label-df-and-message', 'children')])
def output_upload_status_label(data):
return data[1]
#t-SNE Button Click --> Update t-SNE graph with states
@app.callback(Output('tsne-plot-div', 'children'),
[Input('tsne-train-button', 'n_clicks')],
[State('perplexity-state', 'value'),
State('n-iter-state', 'value'),
State('lr-state', 'value'),
State('data-df-and-message', 'children'),
State('label-df-and-message', 'children')
])
def update_tsne_graph(n_clicks, perplexity, n_iter, learning_rate, data_div, label_div):
"""Run the t-SNE algorithm upon clicking the training button"""
error_message = None # No error message at the beginning
# Fix for startup POST
if n_clicks <= 0 and (data_div is None or label_div is None):
global data
kl_divergence, end_time = None, None
else:
print("n_clicks___ = ", n_clicks)
data_df = np.array(pd.read_csv("data/output_embeddings.csv"))
label_df = | pd.read_csv("data/output_labels.csv") | pandas.read_csv |
#######################################################
# ---------- Network Propagation Functions ---------- #
#######################################################
import networkx as nx
import time
import numpy as np
import pandas as pd
# Normalize network (or network subgraph) for random walk propagation
# If symmetric norm is used then the adjacency matrix is normalized as D^-0.5 * A * D^-0.5
# Otherwise the network is normalized as A*D-1
# Where D is the diagonalized degree (default is colsum) of the adjacency matrix A
def normalize_network(network, symmetric_norm=False):
adj_mat = nx.adjacency_matrix(network)
adj_array = np.array(adj_mat.todense())
if symmetric_norm:
D = np.diag(1/np.sqrt(sum(adj_array)))
adj_array_norm = np.dot(np.dot(D, adj_array), D)
else:
degree = sum(adj_array)
adj_array_norm = (adj_array*1.0/degree).T
return adj_array_norm
# Closed form random-walk propagation (as seen in HotNet2) for each subgraph: Ft = (1-alpha)*Fo * (I-alpha*norm_adj_mat)^-1
# Concatenate to previous set of subgraphs
def fast_random_walk(alpha, binary_mat, subgraph_norm, prop_data_prev):
term1=(1-alpha)*binary_mat
term2=np.identity(binary_mat.shape[1])-alpha*subgraph_norm
term2_inv = np.linalg.inv(term2)
subgraph_prop = np.dot(term1, term2_inv)
prop_data_add = np.concatenate((prop_data_prev, subgraph_prop), axis=1)
return prop_data_add
# Wrapper for random walk propagation of full network by subgraphs
# Implementation is based on the closed form of the random walk model over networks presented by the HotNet2 paper
def network_propagation(network, binary_matrix, alpha=0.7, symmetric_norm=False, verbose=True, **save_args):
# Parameter error check
alpha = float(alpha)
if alpha <= 0.0 or alpha >= 1.0:
raise ValueError('Alpha must be a value between 0 and 1')
# Begin network propagation
starttime=time.time()
if verbose:
print('Performing network propagation with alpha:', alpha)
# Separate network into connected components and calculate propagation values of each sub-sample on each connected component
subgraphs = list(nx.connected_component_subgraphs(network))
# Initialize propagation results by propagating first subgraph
subgraph = subgraphs[0]
subgraph_nodes = list(subgraph.nodes)
prop_data_node_order = list(subgraph_nodes)
binary_matrix_filt = np.array(binary_matrix.T.ix[subgraph_nodes].fillna(0).T)
subgraph_norm = normalize_network(subgraph, symmetric_norm=symmetric_norm)
prop_data_empty = np.zeros((binary_matrix_filt.shape[0], 1))
prop_data = fast_random_walk(alpha, binary_matrix_filt, subgraph_norm, prop_data_empty)
# Get propagated results for remaining subgraphs
for subgraph in subgraphs[1:]:
subgraph_nodes = list(subgraph.nodes)
prop_data_node_order = prop_data_node_order + subgraph_nodes
binary_matrix_filt = np.array(binary_matrix.T.ix[subgraph_nodes].fillna(0).T)
subgraph_norm = normalize_network(subgraph, symmetric_norm=symmetric_norm)
prop_data = fast_random_walk(alpha, binary_matrix_filt, subgraph_norm, prop_data)
# Return propagated result as dataframe
prop_data_df = | pd.DataFrame(data=prop_data[:,1:], index = binary_matrix.index, columns=prop_data_node_order) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pathlib
import os
import matplotlib.pyplot as plt
from bokeh.layouts import row
from bokeh.plotting import figure, output_file, show, gridplot
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.palettes import Spectral6
from bokeh.transform import factor_cmap
###############################################################################
current_week = 38
output_week = "/Users/christianhilscher/desktop/dynsim/output/week" + str(current_week) + "/"
pathlib.Path(output_week).mkdir(parents=True, exist_ok=True)
###############################################################################
input_path = "/Users/christianhilscher/Desktop/dynsim/input/"
output_path = "/Users/christianhilscher/Desktop/dynsim/output/"
plot_path = "/Users/christianhilscher/Desktop/dynsim/src/plotting/"
os.chdir(plot_path)
def get_data(dataf, into_future, variable, metric):
dataf = dataf.copy()
dataf = dataf[dataf[variable + "_real"] > 0]
ml_values = np.empty(len(into_future))
standard_values = np.empty_like(ml_values)
real_values = np.empty_like(ml_values)
for (ind, a) in enumerate(into_future):
df_ana = dataf[dataf["period_ahead"] == a]
ml_values[ind] = get_value(df_ana, variable, metric, type="_ml")
standard_values[ind] = get_value(df_ana, variable, metric, type="_standard")
real_values[ind] = get_value(df_ana, variable, metric, type="_real")
dici = {"ml_values": ml_values,
"standard_values": standard_values,
"real_values": real_values}
dici["ahead"] = into_future
return dici
def get_value(dataf, var, metric, type):
dataf = dataf.copy()
variable = var + type
if metric == "mean":
res = dataf[variable].mean()
elif metric == "median":
res = dataf[variable].median()
elif metric == "variance":
res = dataf[variable].var()
elif metric == "p90p50":
p90_val = np.quantile(dataf[variable], 0.9)
p50_val = np.quantile(dataf[variable], 0.5)
res = p90_val/p50_val
elif metric == "p50p10":
p10_val = np.quantile(dataf[variable], 0.1)
p50_val = np.quantile(dataf[variable], 0.5)
res = p50_val/p10_val
else:
pass
return res
def plot_deviations(dataf, into_future, variable, metric):
dataf = dataf.copy()
dataf = pd.DataFrame(dataf)
dataf = pd.melt(dataf, id_vars=["ahead"])
future = dataf["ahead"].unique().tolist()
future = [str(f) for f in future]
types = dataf["variable"].unique().tolist()
x = [(a, type) for a in future for type in types]
counts = dataf["value"]
name = metric + " for " + variable
s = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(x_range=FactorRange(*x), title=name)
p.vbar(x='x', top='counts', width=0.9, source=s,fill_color=factor_cmap('x', palette=palette, factors=types, start=1, end=2))
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
return p
df = | pd.read_pickle(output_week + "df_analysis_full") | pandas.read_pickle |
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>
"""Class for benchmarking model templates."""
from typing import Dict
from typing import List
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from tqdm.autonotebook import tqdm
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import PREDICTED_LOWER_COL
from greykite.common.constants import PREDICTED_UPPER_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.evaluation import add_finite_filter_to_scorer
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import get_pattern_cols
from greykite.common.viz.timeseries_plotting import plot_multivariate
from greykite.common.viz.timeseries_plotting import plot_multivariate_grouped
from greykite.framework.benchmark.benchmark_class_helper import forecast_pipeline_rolling_evaluation
from greykite.framework.constants import FORECAST_STEP_COL
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.forecaster import Forecaster
from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
class BenchmarkForecastConfig:
"""Class for benchmarking multiple ForecastConfig on a rolling window basis.
Attributes
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns.
Regressor columns should include future values for prediction.
configs : `Dict` [`str`, `ForecastConfig`]
Dictionary of model configurations.
A model configuration is a ``ForecastConfig``.
See :class:`~greykite.framework.templates.autogen.forecast_config.ForecastConfig` for details on
valid ``ForecastConfig``.
Validity of the ``configs`` for benchmarking is checked via the ``validate`` method.
tscv : `~greykite.sklearn.cross_validation.RollingTimeSeriesSplit`
Cross-validation object that determines the rolling window evaluation.
See :class:`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` for details.
The ``forecast_horizon`` and ``periods_between_train_test`` parameters of ``configs`` are
matched against that of ``tscv``. A ValueError is raised if there is a mismatch.
forecaster : `~greykite.framework.templates.forecaster.Forecaster`
Forecaster used to create the forecasts.
is_run : bool, default False
Indicator of whether the `run` method is executed.
After executing `run`, this indicator is set to True.
Some class methods like ``get_forecast`` requires ``is_run`` to be True
to be executed.
result : `dict`
Stores the benchmarking results. Has the same keys as ``configs``.
forecasts : `pandas.DataFrame`, default None
Merged DataFrame of forecasts, upper and lower confidence interval for all
input ``configs``. Also stores train end date and forecast step for each prediction.
"""
def __init__(
self,
df: pd.DataFrame,
configs: Dict[str, ForecastConfig],
tscv: RollingTimeSeriesSplit,
forecaster: Forecaster = Forecaster()):
self.df = df
self.configs = configs
self.tscv = tscv
self.forecaster = forecaster
self.is_run = False
# output
self.result = dict.fromkeys(configs.keys())
self.forecasts = None
def validate(self):
"""Validates the inputs to the class for the method ``run``.
Raises a ValueError if there is a mismatch between the following parameters
of ``configs`` and ``tscv``:
- ``forecast_horizon``
- ``periods_between_train_test``
Raises ValueError if all the ``configs`` do not have the same ``coverage`` parameter.
"""
coverage_list = []
for config_name, config in self.configs.items():
# Checks forecast_horizon
if config.forecast_horizon != self.tscv.forecast_horizon:
raise ValueError(f"{config_name}'s 'forecast_horizon' ({config.forecast_horizon}) does "
f"not match that of 'tscv' ({self.tscv.forecast_horizon}).")
# Checks periods_between_train_test
if config.evaluation_period_param.periods_between_train_test != self.tscv.periods_between_train_test:
raise ValueError(f"{config_name}'s 'periods_between_train_test' ({config.evaluation_period_param.periods_between_train_test}) "
f"does not match that of 'tscv' ({self.tscv.periods_between_train_test}).")
coverage_list.append(config.coverage)
# Computes pipeline parameters
pipeline_params = self.forecaster.apply_forecast_config(
df=self.df,
config=config)
self.result[config_name] = dict(pipeline_params=pipeline_params)
# Checks all coverages are same
if coverage_list[1:] != coverage_list[:-1]:
raise ValueError("All forecast configs must have same coverage.")
def run(self):
"""Runs every config and stores the output of the
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
This function runs only if the ``configs`` and ``tscv`` are jointly valid.
Returns
-------
self : Returns self. Stores pipeline output of every config in ``self.result``.
"""
self.validate()
with tqdm(self.result.items(), ncols=800, leave=True) as progress_bar:
for (config_name, config) in progress_bar:
# Description will be displayed on the left of progress bar
progress_bar.set_description(f"Benchmarking '{config_name}' ")
rolling_evaluation = forecast_pipeline_rolling_evaluation(
pipeline_params=config["pipeline_params"],
tscv=self.tscv)
config["rolling_evaluation"] = rolling_evaluation
self.is_run = True
def extract_forecasts(self):
"""Extracts forecasts, upper and lower confidence interval for each individual
config. This is saved as a ``pandas.DataFrame`` with the name
``rolling_forecast_df`` within the corresponding config of ``self.result``.
e.g. if config key is "silverkite", then the forecasts are stored in
``self.result["silverkite"]["rolling_forecast_df"]``.
This method also constructs a merged DataFrame of forecasts,
upper and lower confidence interval for all input ``configs``.
"""
if not self.is_run:
raise ValueError("Please execute 'run' method to create forecasts.")
merged_df = pd.DataFrame()
for config_name, config in self.result.items():
rolling_evaluation = config["rolling_evaluation"]
rolling_forecast_df = pd.DataFrame()
for num, (split_key, split_value) in enumerate(rolling_evaluation.items()):
forecast = split_value["pipeline_result"].forecast
# Subsets forecast_horizon rows from the end of forecast dataframe
forecast_df = forecast.df.iloc[-forecast.forecast_horizon:]
forecast_df.insert(0, "train_end_date", forecast.train_end_date)
forecast_df.insert(1, FORECAST_STEP_COL, np.arange(forecast.forecast_horizon) + 1)
forecast_df.insert(2, "split_num", num)
rolling_forecast_df = pd.concat([rolling_forecast_df, forecast_df], axis=0)
rolling_forecast_df = rolling_forecast_df.reset_index(drop=True)
self.result[config_name]["rolling_forecast_df"] = rolling_forecast_df
# Merges the forecasts of individual config
# Augments prediction columns with config name
pred_cols = [PREDICTED_COL]
if PREDICTED_LOWER_COL in rolling_forecast_df.columns:
pred_cols.append(PREDICTED_LOWER_COL)
if PREDICTED_UPPER_COL in rolling_forecast_df.columns:
pred_cols.append(PREDICTED_UPPER_COL)
mapper = {
col: f"{config_name}_{col}" for col in pred_cols
}
if merged_df.empty:
temp_df = rolling_forecast_df.rename(columns=mapper)
else:
temp_df = rolling_forecast_df[pred_cols].rename(columns=mapper)
merged_df = pd.concat([merged_df, temp_df], axis=1)
self.forecasts = merged_df.reset_index(drop=True)
def plot_forecasts_by_step(
self,
forecast_step: int,
config_names: List = None,
xlabel: str = TIME_COL,
ylabel: str = VALUE_COL,
title: str = None,
showlegend: bool = True):
"""Returns a ``forecast_step`` ahead rolling forecast plot.
The plot consists one line for each valid. ``config_names``.
If available, the corresponding actual values are also plotted.
For a more customizable plot, see
:func:`~greykite.common.viz.timeseries_plotting.plot_multivariate`
Parameters
----------
forecast_step : `int`
Which forecast step to plot. A forecast step is an integer between 1 and the
forecast horizon, inclusive, indicating the number of periods from train end date
to the prediction date (# steps ahead).
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
xlabel : `str` or None, default TIME_COL
x-axis label.
ylabel : `str` or None, default VALUE_COL
y-axis label.
title : `str` or None, default None
Plot title. If None, default is based on ``forecast_step``.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objs.Figure`
Interactive plotly graph.
Plots multiple column(s) in ``self.forecasts`` against ``TIME_COL``.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
"""
if self.forecasts is None:
self.extract_forecasts()
if forecast_step > self.tscv.forecast_horizon:
raise ValueError(f"`forecast_step` ({forecast_step}) must be less than or equal to "
f"forecast horizon ({self.tscv.forecast_horizon}).")
config_names = self.get_valid_config_names(config_names)
y_cols = [TIME_COL, ACTUAL_COL] + \
[f"{config_name}_{PREDICTED_COL}" for config_name in config_names]
df = self.forecasts[self.forecasts[FORECAST_STEP_COL] == forecast_step]
df = df[y_cols]
if title is None:
title = f"{forecast_step}-step ahead rolling forecasts"
fig = plot_multivariate(
df=df,
x_col=TIME_COL,
y_col_style_dict="plotly",
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
return fig
def plot_forecasts_by_config(
self,
config_name: str,
colors: List = DEFAULT_PLOTLY_COLORS,
xlabel: str = TIME_COL,
ylabel: str = VALUE_COL,
title: str = None,
showlegend: bool = True):
"""Returns a rolling plot of the forecasts by ``config_name`` against ``TIME_COL``.
The plot consists of one line for each available split. Some lines may overlap if test
period in corresponding splits intersect. Hence every line is given a different color.
If available, the corresponding actual values are also plotted.
For a more customizable plot, see
:func:`~greykite.common.viz.timeseries_plotting.plot_multivariate_grouped`
Parameters
----------
config_name : `str`
Which config result to plot.
The name must match the name of one of the input ``configs``.
colors : [`str`, `List` [`str`]], default ``DEFAULT_PLOTLY_COLORS``
Which colors to use to build the color palette.
This can be a list of RGB colors or a `str` from ``PLOTLY_SCALES``.
To use a single color for all lines, pass a `List` with a single color.
xlabel : `str` or None, default TIME_COL
x-axis label.
ylabel : `str` or None, default VALUE_COL
y-axis label.
title : `str` or None, default None
Plot title. If None, default is based on ``config_name``.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objs.Figure`
Interactive plotly graph.
Plots multiple column(s) in ``self.forecasts`` against ``TIME_COL``.
"""
if self.forecasts is None:
self.extract_forecasts()
config_name = self.get_valid_config_names([config_name])[0]
if title is None:
title = f"Rolling forecast for {config_name}"
fig = plot_multivariate_grouped(
df=self.forecasts,
x_col=TIME_COL,
y_col_style_dict={
ACTUAL_COL: {
"line": {
"width": 1,
"dash": "solid"
}
}
},
grouping_x_col="split_num",
grouping_x_col_values=None,
grouping_y_col_style_dict={
f"{config_name}_{PREDICTED_COL}": {
"name": "split",
"line": {
"width": 1,
"dash": "solid"
}
}
},
colors=colors,
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
return fig
def get_evaluation_metrics(
self,
metric_dict: Dict,
config_names: List = None):
"""Returns rolling train and test evaluation metric values.
Parameters
----------
metric_dict : `dict` [`str`, `callable`]
Evaluation metrics to compute.
- key: evaluation metric name, used to create column name in output.
- value: metric function to apply to forecast df in each split to generate the column value.
Signature (y_true: `str`, y_pred: `str`) -> transformed value: `float`.
For example::
metric_dict = {
"median_residual": lambda y_true, y_pred: np.median(y_true - y_pred),
"mean_squared_error": lambda y_true, y_pred: np.mean((y_true - y_pred)**2)
}
Some predefined functions are available in
`~greykite.common.evaluation`. For example::
metric_dict = {
"correlation": lambda y_true, y_pred: correlation(y_true, y_pred),
"RMSE": lambda y_true, y_pred: root_mean_squared_error(y_true, y_pred),
"Q_95": lambda y_true, y_pred: partial(quantile_loss(y_true, y_pred, q=0.95))
}
As shorthand, it is sufficient to provide the corresponding ``EvaluationMetricEnum``
member. These are auto-expanded into the appropriate function.
So the following is equivalent::
metric_dict = {
"correlation": EvaluationMetricEnum.Correlation,
"RMSE": EvaluationMetricEnum.RootMeanSquaredError,
"Q_95": EvaluationMetricEnum.Quantile95
}
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
Returns
-------
evaluation_metrics_df : pd.DataFrame
A DataFrame containing splitwise train and test evaluation metrics for ``metric_dict``
and ``config_names``.
For example. Let's assume::
metric_dict = {
"RMSE": EvaluationMetricEnum.RootMeanSquaredError,
"Q_95": EvaluationMetricEnum.Quantile95
}
config_names = ["default_prophet", "custom_silverkite"]
These are valid ``config_names`` and there are 2 splits for each.
Then evaluation_metrics_df =
config_name split_num train_RMSE test_RMSE train_Q_95 test_Q_95
default_prophet 0 * * * *
default_prophet 1 * * * *
custom_silverkite 0 * * * *
custom_silverkite 1 * * * *
where * represents computed values.
"""
if not self.is_run:
raise ValueError("Please execute the 'run' method before computing evaluation metrics.")
metric_dict = self.autocomplete_metric_dict(
metric_dict=metric_dict,
enum_class=EvaluationMetricEnum)
config_names = self.get_valid_config_names(config_names=config_names)
evaluation_metrics_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import json
dataset_file1 = open('../../data/AMT/AMT_results/Final_Dataset.json','r') #change the file name here to fettch the data from
tab1 = pd.read_json(dataset_file1, lines=True)
#tab1 = tab1.sort_values(by=['question_id'])
dataset_file2 = open('../../data/Preprocessing/CSQA_version/Dataset_SimpleQA_qualifiers.json','r') #change the file name here to fetch the data from
dataset_decode2 = json.load(dataset_file2)
tab2 = pd.DataFrame(data=dataset_decode2,columns= ['question_id', 'question_entity_label','question_relation'])
#print(tab2.head)
"""
tab_ent= tab2['answer_entity_labels'].apply(pd.Series)
tab_ent = tab_ent.rename(columns = lambda x : 'tag_' + str(x))
new_tab = pd.concat([tab2[:],tab_ent[:]],axis=1)
new_tab = new_tab[new_tab['tag_1'].isnull()]
new_tab = new_tab[new_tab['tag_2'].isnull()]
new_tab = new_tab.drop(['answer_entity_labels'],axis=1)
new_tab.rename(columns = {'tag_0':'answer_entity_label'}, inplace=True)
new_tab = new_tab.dropna(axis = 'columns', how= 'all')
new_tab = new_tab.dropna(axis='index', how = 'any')
tab2 = tab2.sort_values(by=['question_id'])
"""
dataset_file3 = open('../../data/Preprocessing/SimpleQuestionWikidata_version/Dataset_SimpleQA_labels.json','r') #change the file name here to fetch the data from
dataset_decode3 = json.load(dataset_file3)
tab3 = | pd.DataFrame(data=dataset_decode3,columns= ['question_id', 'question_entity_label','question_relation']) | pandas.DataFrame |
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from scipy.special import softmax
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas
import matplotlib.cm as cm
import umap
import tqdm
import scanpy as sc
import matplotlib.gridspec as gridspec
import networkx as nx
import matplotlib as mpl
import numpy
import operator
import random
import pickle
import collections
import sys
import os
class GeneEmbedding(object):
def __init__(self, embedding_file, compass_dataset, vector="1"):
if vector not in ("1","2","average"):
raise ValueError("Select the weight vector from: ('1','2','average')")
if vector == "average":
print("Loading average of 1st and 2nd weights.")
avg_embedding = embedding_file.replace(".vec","_avg.vec")
secondary_weights = embedding_file.replace(".vec","2.vec")
GeneEmbedding.average_vector_results(embedding_file,secondary_weights,avg_embedding)
self.embeddings = self.read_embedding(avg_embedding)
elif vector == "1":
print("Loading first weights.")
self.embeddings = self.read_embedding(embedding_file)
elif vector == "2":
print("Loading second weights.")
secondary_weights = embedding_file.replace(".vec","2.vec")
self.embeddings = self.read_embedding(secondary_weights)
self.vector = []
self.context = compass_dataset.data
self.embedding_file = embedding_file
self.vector = []
self.genes = []
for gene in tqdm.tqdm(self.embeddings.keys()):
# if gene in self.embeddings:
self.vector.append(self.embeddings[gene])
self.genes.append(gene)
def read_embedding(self, filename):
embedding = dict()
lines = open(filename,"r").read().splitlines()[1:]
for line in lines:
vector = line.split()
gene = vector.pop(0)
embedding[gene] = [float(x) for x in vector]
return embedding
def compute_similarities(self, gene, subset=None, feature_type=None):
if gene not in self.embeddings:
return None
if feature_type:
subset = []
for gene in list(self.embeddings.keys()):
if feature_type == self.context.feature_types[gene]:
subset.append(gene)
embedding = self.embeddings[gene]
distances = dict()
if subset:
targets = set(list(self.embeddings.keys())).intersection(set(subset))
else:
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def get_similar_genes(self, vector):
distances = dict()
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def cluster(self, threshold=0.75, lower_bound=1):
cluster_definitions = collections.defaultdict(list)
G = embed.generate_network(threshold=threshold)
G.remove_edges_from(networkx.selfloop_edges(G))
for i, connected_component in enumerate(networkx.connected_components(G)):
subg = G.subgraph(connected_component)
if len(subg.nodes()) > lower_bound:
# if len(subg.nodes()) == 2:
# cluster_definitions[str(i+j+100)] += list(subg.nodes())
# continue
clique_tree = networkx.tree.junction_tree(subg)
clique_tree.remove_nodes_from(list(networkx.isolates(clique_tree)))
for j, cc in enumerate(nx.connected_components(clique_tree)):
for clique_cc in cc:
cluster_definitions[str(i+j)] += list(set(clique_cc))
self.cluster_definitions = cluster_definitions
return self.cluster_definitions
def clusters(self, clusters):
average_vector = dict()
gene_to_cluster = collections.defaultdict(list)
matrix = collections.defaultdict(list)
total_average_vector = []
for gene, cluster in zip(self.context.expressed_genes, clusters):
if gene in self.embeddings:
matrix[cluster].append(self.embeddings[gene])
gene_to_cluster[cluster].append(gene)
total_average_vector.append(self.embeddings[gene])
self.total_average_vector = list(numpy.average(total_average_vector, axis=0))
for cluster, vectors in matrix.items():
xvec = list(numpy.average(vectors, axis=0))
average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)
return average_vector, gene_to_cluster
def generate_vector(self, genes):
vector = []
for gene, vec in zip(self.genes, self.vector):
if gene in genes:
vector.append(vec)
assert len(vector) != 0, genes
return list(numpy.median(vector, axis=0))
def cluster_definitions_as_df(self, top_n=20):
similarities = self.cluster_definitions
clusters = []
symbols = []
for key, genes in similarities.items():
clusters.append(key)
symbols.append(", ".join(genes[:top_n]))
df = pandas.DataFrame.from_dict({"Cluster Name":clusters, "Top Genes":symbols})
return df
def plot(self, png=None, method="TSNE", labels=[], pcs=None, remove=[]):
plt.figure(figsize = (8, 8))
ax = plt.subplot(1,1,1)
pcs = self.plot_reduction(self.cluster_labels, ax, labels=labels, method=method, pcs=pcs, remove=remove)
# if png:
# plt.savefig(png)
# plt.close()
# else:
plt.show()
return pcs
def marker_labels(self,top_n=5):
markers = []
cluster_definitions = self.cluster_definitions
marker_labels = dict()
for gclust, genes in cluster_definitions.items():
print(gclust, ",".join(genes[:5]))
markers += genes[:top_n]
for gene in genes[:top_n]:
marker_labels[gene] = gclust
return markers, marker_labels
def plot_reduction(self, clusters, ax, method="TSNE", labels=[], pcs=None, remove=[]):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.vector)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
if len(remove) != 0:
_pcsx = []
_pcsy = []
_clusters = []
for x, y, c in zip(pcs[0],pcs[1],clusters):
if c not in remove:
_pcsx.append(x)
_pcsy.append(y)
_clusters.append(c)
pcs = []
pcs.append(_pcsx)
pcs.append(_pcsy)
clusters = _clusters
data = {"x":pcs[0],"y":pcs[1], "Cluster":clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y",hue="Cluster", ax=ax)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax.set_xticks([])
ax.set_yticks([])
if len(labels):
for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):
if gene in labels:
ax.text(x+.02, y, str(gene), fontsize=8)
return pcs
def subtract_vector(self, vector):
for gene, vec in self.embeddings.items():
vec = numpy.subtract(vec-vector)
self.embeddings[gene] = vec
def relabel_clusters(self, clusters, annotations):
_clusters = []
for cluster in clusters:
if cluster in annotations:
_clusters.append(annotations[cluster])
else:
_clusters.append(cluster)
self.cluster_labels = _clusters
return _clusters
def plot_similarity_matrix(self, top_n=5, png=None):
markers, marker_labels = self.marker_labels(top_n=top_n)
cmap = matplotlib.cm.tab20
node_color = {}
type_color = {}
ctypes = []
for marker in markers:
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color[marker] = cmap(ctypes.index(marker_labels[marker]))
type_color[marker_labels[marker]] = cmap(ctypes.index(marker_labels[marker]))
mm = pandas.DataFrame(markers, index=markers)
mm["Gene Cluster"] = mm[0]
row_colors = mm["Gene Cluster"].map(node_color)
similarity_matrix = []
markers = set(list(self.embeddings.keys())).intersection(set(markers))
markers = list(markers)
for marker in markers:
row = []
res = self.compute_similarities(marker, subset=markers)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene in markers:
row.append(resdict[gene])
similarity_matrix.append(row)
from matplotlib.patches import Patch
plt.figure()
matrix = numpy.array(similarity_matrix)
df = pandas.DataFrame(matrix,index=markers,columns=markers)
sns.clustermap(df,cbar_pos=None,figsize=(12,12), dendrogram_ratio=0.1, cmap="mako",row_colors=row_colors,yticklabels=True,xticklabels=True)
handles = [Patch(facecolor=type_color[name]) for name in type_color]
plt.legend(handles, type_color, title='Gene Cluster',
bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure, loc='upper right')
plt.tight_layout()
if png:
plt.savefig("marker_similarity.png")
else:
plt.show()
def similarity_network(self,top_n=5):
markers, marker_labels = self.marker_labels(top_n=top_n)
G = nx.Graph()
for marker in markers:
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene, similarity in resdict.items():
if gene != marker:
if gene not in G.nodes():
G.add_node(gene)
G.add_edge(marker, gene, weight=similarity)
return G
def plot_similarity_network(self, top_n=5, png=None):
markers, marker_labels = self.marker_labels(top_n=top_n)
cmap = matplotlib.cm.tab20
G = nx.petersen_graph()
node_color = []
node_order = []
node_size = []
edge_order = []
edge_color = []
edge_labels = dict()
for marker in markers:
node_order.append(marker)
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color.append(ctypes.index(marker_labels[marker]))
node_size.append(400)
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
i = 0
for gene, similarity in resdict.items():
if i > 9: break
if gene != marker:
if gene not in G.nodes():
node_size.append(0)
G.add_node(gene)
node_order.append(gene)
node_color.append(len(set(marker_labels.values())))
G.add_edge(marker, gene, weight=similarity)
edge_color.append(similarity)
edge_order.append((marker,gene))
edge_labels[(marker,gene)] = str(round(similarity,2))
i += 1
# print(node_color)
# c = max(nx.connected_components(G), key=len)
# G = G.subgraph(c).copy()
for i in range(10):
G.remove_node(i)
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(1,1,1)
pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
nx.draw(G,pos,ax=ax, cmap=cmap,nodelist=node_order,
node_size=node_size,
edgelist=edge_order,
node_color=node_color,
edge_color=edge_color,
edge_vmin=0,
edge_vmax=1.0,
edge_cmap=plt.cm.Greys,
with_labels=True, width=1,font_size=7)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=6)
plt.axis('off')
plt.tight_layout()
if png:
plt.savefig(png)
else:
plt.show()
return G
@staticmethod
def read_vector(vec):
lines = open(vec,"r").read().splitlines()
dims = lines.pop(0)
vecs = dict()
for line in lines:
line = line.split()
gene = line.pop(0)
vecs[gene] = list(map(float,line))
return vecs, dims
@staticmethod
def cluster_network(genes, nxg, threshold=0.0, title="", display=True):
G = nxg.subgraph(genes)
for subg in nx.connected_components(G):
if len(subg) > 1:
if display:
fig = plt.figure(figsize=(14,6))
ax = plt.subplot(1,2,1)
subG = G.subgraph(list(subg))
centrality = dict(nx.betweenness_centrality(subG))
low, *_, high = sorted(centrality.values())
norm = mpl.colors.Normalize(vmin=low, vmax=high, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.coolwarm)
pos = nx.nx_agraph.graphviz_layout(subG, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
nx.draw(subG,pos,with_labels=True,
node_color=[mapper.to_rgba(i) for i in centrality.values()],
node_size=100,ax=ax,font_size=16,
edge_color=[[0.5,0.5,0.5,0.5] for _ in subG.edges()])
vector = embed.generate_vector(list(subg))
ax = plt.subplot(1,2,2)
pcs = self.pcs["UMAP"]
distances = []
dataset_distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(self.dataset_vector).reshape(1, -1))[0])
for cell_vector in self.matrix:
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
d = distance-dataset_distance
if d < threshold:
d = threshold
distances.append(d)
data = {"x":pcs[0],"y":pcs[1],"Distance": distances}
df = | pandas.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# @Author: rish
# @Date: 2020-08-19 12:22:30
# @Last Modified by: rish
# @Last Modified time: 2020-08-20 00:37:52
### Imports START
import config
import json
import logging
import pandas as pd
### Imports END
### Global declarations
logger = logging.getLogger(__name__)
# [START Function to get data into memory in one go]
def get_data(input_file):
'''
Function to load the data in memory in one go.
Args:
- input file name / relative path
Returns:
- parsed data frame
'''
# Get data in memory
with open(config.BASE_PATH + input_file) as f:
data = f.read().splitlines()
# Parse json
df = | pd.DataFrame(data, columns=['json']) | pandas.DataFrame |
import pandas as pd # 데이터프레임
import numpy as np # 행렬처리
from tkinter import filedialog
from tkinter import messagebox
import tkinter as tk
import tkinter.ttk as ttk
from winreg import *
import os
def central_box(root):
# Gets the requested values of the height and widht.
windowWidth = root.winfo_reqwidth()
windowHeight = root.winfo_reqheight()
# Gets both half the screen width/height and window width/height
positionRight = int(root.winfo_screenwidth() / 2 - windowWidth / 2)
positionDown = int(root.winfo_screenheight() / 2 - windowHeight / 2)
# Positions the window in the center of the page.
root.geometry("+{}+{}".format(positionRight, positionDown))
return root
def make_quota(filename, levels, level, num, grouping, condition_name, filtering=None):
# Load Data
df = pd.read_excel("{}".format(filename), sheet_name=1)
if filtering==None:
df = df[df.구분=='광역시도']
elif filtering[0]=='세종특별자치시':
df = df[df.구분==level]
df = df.drop(levels, axis=1)
# 필터링
if filtering!=None:
if (filtering[1]=='구 지역') | (filtering[0]=='세종특별자치시'):
df = df[df.광역시도==filtering[0]].sum()
df = pd.DataFrame(df).T
df['광역시도'] = filtering[0]
df['시군구'] = filtering[1]
else:
df = df[(df.광역시도==filtering[0])&(df.시군구==filtering[1])]
df = df.rename(columns={'광역시도': '전체'})
df = df.drop('구분', axis=1)
# 그룹화
if grouping:
# 그룹화 리스트
gp_name_lst = [('경기도','인천광역시'),('대전광역시','세종특별자치시','충청남도','충청북도'),
('광주광역시','전라남도','전라북도'),('대구광역시','경상북도'),
('부산광역시','울산광역시','경상남도'),('강원도','제주특별자치도')]
# 그룹화할 이름을 반복문으로 처리
for gp_names in gp_name_lst:
# 충청남도와 세종특별자치시만 추출후 합계
new = df[df.광역시도.isin(gp_names)].sum(axis=0)
# 충남/세종 합계 새로운 행으로 추가
df = df.append(new, ignore_index=True)
# 이름 변경
df.iloc[-1,0] = '광역시도'
df.iloc[-1,1] = '/'.join(gp_names) # /으로 지역들을 묶어줌
# 충남, 세종 제외
df = df[~df.광역시도.isin(gp_names)]
elif (level=='광역시도') and (filtering==None):
# 그룹화할 이름
gp_names =['충청남도','세종특별자치시']
# 충청남도와 세종특별자치시만 추출후 합계
new = df[df.광역시도.isin(gp_names)].sum(axis=0)
# 충남/세종 합계 새로운 행으로 추가
df = df.append(new, ignore_index=True)
# 이름 변경
df.iloc[-1,0] = '광역시도'
df.iloc[-1,1] = '충청남도/세종특별자치시'
# 충남, 세종 제외
df = df[~df.광역시도.isin(gp_names)]
# Define Features
male_cols = ['남 19-29세', '남 30대', '남 40대', '남 50대', '남 60세 이상']
female_cols = ['여 19-29세', '여 30대', '여 40대', '여 50대', '여 60세이상']
total_cols = male_cols + female_cols
# Total Population
try:
total_pop = df[total_cols].sum().sum()
except:
messagebox.showerror("메세지 박스","해당 파일의 기준 변수명이 다릅니다.")
exit()
# 2단계 반올림 전
before_df = df.copy()
before_df[total_cols] = (df[total_cols] / total_pop) * num # 각 셀값을 전체 인구로 나누고 정해진 값으로 곱ㅎ
before_df['남 합계'] = before_df[male_cols].sum(axis=1)
before_df['여 합계'] = before_df[female_cols].sum(axis=1)
before_df['총계'] = before_df[['남 합계' ,'여 합계']].sum(axis=1)
# 2단계 남여 각각 합계의 반올림
before_sex_sum = before_df[['남 합계' ,'여 합계']].sum().round()
# 3단계 반올림 후
after_df = df.copy()
after_df[total_cols] = (df[total_cols] / total_pop) * num # 각 셀값을 전체 인구로 나누고 정해진 값으로 곱ㅎ
after_df[total_cols] = after_df[total_cols].astype(float).round().astype(int) # 각 셀을 반올림
after_df['남 합계'] = after_df[male_cols].sum(axis=1)
after_df['여 합계'] = after_df[female_cols].sum(axis=1)
after_df['총계'] = after_df[['남 합계' ,'여 합계']].sum(axis=1)
# 3단계 남여 각각 합계의 반올림
after_sex_sum = after_df[['남 합계' ,'여 합계']].sum()
# 2,3단계 남여 합계의 차이
'''
차이는 세 가지 경우로 나뉜다: 남여 각각 차이가 1. 0이거나 / 2. 0보다 크거나 / 3. 0보다 작거나
1. 0인 경우는 추가적인 일 없이 표 완성
2. 만약 차이가 0보다 큰 경우 : xx.5 보다 작고 xx.5에 가장 가까운 값인 반올림 값에 + 1
- Why? 반올림하여 내림이 된 값 중 가장 올림에 가까운 값에 1을 더하는 것이 이상적이기 때문
ex) 2.49999 -> round(2.49999) + 1
3. 만약 차이가 0보다 작은 경우 : xx.5 이상이고 xx.5에 가장 가까운 값인 반올림 값에 - 1
- Why? 반올림하여 올림이 된 값 중 가장 내림에 가까운 값에 1을 빼는 것이 이상적이기 때문
ex) 2.50001 -> round(2.50001) - 1
'''
sex_diff = before_sex_sum - after_sex_sum
# 성별 합계 차이를 매꾸는 단계
sex_cols_lst = [male_cols, female_cols]
sex_idx = ['남 합계' ,'여 합계']
for i in range(len(sex_idx)):
if sex_diff.loc[sex_idx[i]] > 0:
# 차이가 0보다 큰 경우
'''
1. 2단계 반올림 전 값을 모두 내림 한 후 0.5를 더 한다.
2. 1번에서 한 값과 2단계 반올림 전 값을 뺀다.
3. 음수로 나오는 값은 모두 1로 변환. 1이 가장 큰 값이기 때문.
ex) 13.45 -> 13 으로 내림 후 (13 + 0.5) - 13.45 = 0.05
'''
temp = (before_df[sex_cols_lst[i]].astype(int) + 0.5) - before_df[sex_cols_lst[i]] # 1,2번
temp = temp[temp >0].fillna(1) # 3번
v = 1
elif sex_diff.loc[sex_idx[i]] < 0:
# 차이가 0보다 작은 경우
'''
1. 2단계 반올림 전 값을 모두 내림 한 후 0.5를 더 한다.
2. 1번에서 한 값과 2단계 반올림 전 값을 뺀다.
3. 음수로 나오는 값은 모두 1로 변환. 1이 가장 큰 값이기 때문.
ex) 13.54 -> 13 으로 내림 후 13.54 - (13 + 0.5) = 0.04
'''
temp = before_df[sex_cols_lst[i]] - (before_df[sex_cols_lst[i]].astype(int) + 0.5) # 1,2번
temp = temp[temp >0].fillna(1) # 3번에 해당
v = -1
else:
# 차이가 0인 경우는 이후 과정 생략하고 그냥 통과
continue
# 실제합계와의 차이: 절대값을 통해서 음수를 변환하고 정수로 타입을 변환
cnt = int(abs(sex_diff.loc[sex_idx[i]]))
row_col = np.unravel_index(np.argsort(temp.values.ravel())[:cnt], temp.shape)
rows = row_col[0]
cols = row_col[1]
# 각 (행,열) 좌표값에 v를 더함
for r in range(len(rows)):
temp = after_df[sex_cols_lst[i]].copy()
temp.iloc[rows[r] ,cols[r]] = temp.iloc[rows[r] ,cols[r]] + v
after_df[sex_cols_lst[i]] = temp
print()
# 부족한 부분이 채워졌으면 합계 계산
after_df['남 합계'] = after_df[male_cols].sum(axis=1)
after_df['여 합계'] = after_df[female_cols].sum(axis=1)
after_df['총계'] = after_df[['남 합계' ,'여 합계']].sum(axis=1)
final_sex_sum = after_df[['남 합계' ,'여 합계']].sum()
# 다운로드 폴더 경로 찾기
with OpenKey(HKEY_CURRENT_USER, 'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders') as key:
Downloads = QueryValueEx(key, '{374DE290-123F-4565-9164-39C4925E467B}')[0]
# 완료 메세지
if final_sex_sum.sum() != num:
messagebox.showerror("메세지 상자","합계가 0이 아닙니다. 문제를 확인해주세요.")
else:
save_name = filename.split('/')[-1]
file_path = '{}/{}{}'.format(Downloads, condition_name, save_name)
if filtering==None:
messagebox.showinfo("메세지 상자", "다운로드 폴더에 저장되었습니다.")
after_df.to_excel(file_path, index=False, encoding='cp949')
else:
if os.path.isfile(file_path):
saved_df = pd.read_excel(file_path)
after_df = | pd.concat([saved_df,after_df], axis=0) | pandas.concat |
from util import load_csv_as_dataframe
import pandas as pd
from feature_extractor import FeatureExtractor
import numpy as np
import pickle
from dateutil import parser
import monthdelta
import csv
from util import read_csv_file
from dateutil.relativedelta import relativedelta
import timeit
class LeaderBoard():
def __init__(self, lb1_lb2_file='data/LeaderBoardData/TADPOLE_LB1_LB2.csv', d1_file='data/d1_data.csv'):
lb1_lb2 = load_csv_as_dataframe(lb1_lb2_file)
lb1_lb2['LB1'] = pd.to_numeric(lb1_lb2['LB1'])
lb1_lb2['LB2'] = pd.to_numeric(lb1_lb2['LB2'])
lb1_lb2['RID'] = | pd.to_numeric(lb1_lb2['RID']) | pandas.to_numeric |
from keras.layers import Input, Dense, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, LSTM, RepeatVector
from keras.models import Model
from keras.models import model_from_json
from keras.models import load_model
import pandas as pd
import os
import sys
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# %pylab inline
if len(sys.argv) < 9:
print("Too few arguments")
quit()
elif len(sys.argv) > 9:
print("Too many arguments")
quit()
for i in range (1, len(sys.argv)):
if(sys.argv[i] == "-d"):
input_file = sys.argv[i+1]
elif(sys.argv[i] == "-q"):
query_file = sys.argv[i+1]
elif(sys.argv[i] == "-od"):
out_input_file = sys.argv[i+1]
elif(sys.argv[i] == "-oq"):
out_query_file = sys.argv[i+1]
window_length = 10
encoding_dim = 3
epochs = 100
test_samples = 365
def plot_examples(stock_input, stock_decoded):
n = 10
plt.figure(figsize=(20, 4))
for i, idx in enumerate(list(np.arange(0, test_samples, 50))):
# display original
ax = plt.subplot(2, n, i + 1)
if i == 0:
ax.set_ylabel("Input", fontweight=600)
else:
ax.get_yaxis().set_visible(False)
plt.plot(stock_input[idx])
ax.get_xaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
if i == 0:
ax.set_ylabel("Output", fontweight=600)
else:
ax.get_yaxis().set_visible(False)
plt.plot(stock_decoded[idx])
ax.get_xaxis().set_visible(False)
num_time_series = 100
input_path = os.path.join(os.path.abspath(__file__), "../../../dir/")
input_path = os.path.join(input_path, input_file)
query_path = os.path.join(os.path.abspath(__file__), "../../../dir/")
query_path = os.path.join(query_path, query_file)
encoder_path = os.path.join(os.path.abspath(__file__), "../../../models/encoder.h5")
autoencoder_path = os.path.join(os.path.abspath(__file__), "../../../models/autoencoder.h5")
df_input = | pd.read_csv(input_path, header=None, delimiter='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 16:41:37 2018
@author: krzysztof
This module contains utilities useful when performing data analysis and drug sensitivity prediction with
Genomics of Drug Sensitivity in Cancer (GDSC) database.
Main utilities are Drug classes and Experiment class. All classes beginning with a word "Drug" represent the compound
coming from GDSC. There is a separate class for every corresponding experiment setup and genomic feature space. All Drug
classes contain methods for extraction and storage of proper input data. Available data types include: gene expression, binary copy number and coding variants, and cell line tissue type. The set of considered genes is represented as "targets"
attribute of Drug classes.
The Experiment class is dedicated for storage and analysis of results coming from machine learning experiments. Actual
machine learning is done outside of a class. The Experiment class have methods for storage, analysis and visualisation
of results.
Classes:
Drug: Basic class representing a compound from GDSC.
DrugWithDrugBank: Inherits from Drug, accounts for target genes from DrugBank database.
DrugGenomeWide: Inherits from Drug, designed for using genome-wide gene exression as input data.
DrugDirectReactome: Inherits from DrugWithDrugBank, uses only input data related to target genes resulting
from direct compound-pathway matching from Reactome.
DrugWithGenesInSamePathways: Inherits from DrugWithDrugBank, uses only input data related to genes that belong in
the same pathways as target genes.
Experiment: Designed to store and analyze results coming from machine learning experiments.
"""
# Imports
import pandas as pd
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from scipy.stats import pearsonr
from sklearn.linear_model import ElasticNet
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
from sklearn.dummy import DummyRegressor
from sklearn.pipeline import Pipeline
from sklearn import feature_selection
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import clone
# General imports
import multiprocessing
import numpy as np
import pandas as pd
import time
import sys
import dill
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn import model_selection
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Lasso, ElasticNet
from stability_selection import StabilitySelection
#################################################################################################################
# Drug class
#################################################################################################################
class Drug(object):
"""Class representing compound from GDSC database.
This is the most basic, parent class. Different experimental settings will use more specific,
children classes. Main function of the class is to create and store input data corresponding to a given
drug. Five types of data are considered: gene expression, copy number variants, coding variants, gene expression
signatures, and tumor tissue type. Class instances are initialized with four basic drug properties: ID, name, gene
targets and target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_cnv_data_faster: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
extract_merck_signatures_data: Generate a DataFrame with gene expression signatures provided by Merck.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data and saves it in corresponding instance's
field.
return_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data but does not save it.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of Drug class objects, each referenced by it's ID
(keys are drug GDSC ID's)
load_data: Load all needed data files as DataFrames with one function call.
"""
# Class variables
map_from_hgnc_to_ensembl = None
map_from_ensembl_to_hgnc = None
# Instance methods
def __init__(self, gdsc_id, name, targets, target_pathway):
"""Intiliaze the class instance with four basic attributes. "Targets" are gene names
and get mapped into Ensembl IDs using class mapping variable."""
self.gdsc_id = gdsc_id
self.name = name
self.targets = targets
self.target_pathway = target_pathway
self.ensembl_targets = []
for x in self.targets:
try:
self.ensembl_targets.append(self.map_from_hgnc_to_ensembl[x])
except KeyError:
pass
def extract_drug_response_data(self, sensitivity_profiles_df, metric="AUC"):
"""Generate a DataFrame containing reponses for every cell line screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
None
"""
df = sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id][
["COSMIC_ID", metric]]
df.columns = ["cell_line_id", metric] # Insert column with samples ID
self.total_no_samples_screened = df.shape[0] # Record how many screened cell lines for drug
self.response_data = df # Put DataFrame into corresponding field
def extract_screened_cell_lines(self, sensitivity_profiles_df):
"""Generate set of cell lines screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
self.screened_cell_lines = list(
sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id]["COSMIC_ID"])
def extract_gene_expression(self, gene_expression_df):
"""Generate DataFrame of gene expression data for cell lines screened for this drug, only
considering drug's target genes.
Arguments:
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
cell_lines_str = [] # Gene expressesion DF column names are strings
for x in self.screened_cell_lines:
cell_lines_str.append(str(x))
cl_to_extract = []
for x in cell_lines_str:
if x in list(gene_expression_df.columns):
cl_to_extract.append(x) # Extract only cell lines contained in gene expression data
gene_expr = gene_expression_df[
gene_expression_df.ensembl_gene.isin(self.ensembl_targets)][["ensembl_gene"] + cl_to_extract]
gene_expr_t = gene_expr.transpose()
columns = list(gene_expr_t.loc["ensembl_gene"])
gene_expr_t.columns = columns
gene_expr_t = gene_expr_t.drop(["ensembl_gene"])
rows = list(gene_expr_t.index)
gene_expr_t.insert(0, "cell_line_id", rows) # Insert columns with cell line IDs
gene_expr_t.reset_index(drop=True, inplace=True)
gene_expr_t["cell_line_id"] = pd.to_numeric(gene_expr_t["cell_line_id"])
self.gene_expression_data = gene_expr_t # Put DataFrame into corresponding field
def extract_mutation_data(self, mutation_df):
"""Generate a DataFrame with binary mutation calls for screened cell lines and target genes.
Arguments:
mutation_df: DataFrame with original mutation calls from GDSC.
Returns:
None
"""
targets = [x + "_mut" for x in self.targets]
df = mutation_df.copy()[
mutation_df.cosmic_sample_id.isin(self.screened_cell_lines)]
df = df[df.genetic_feature.isin(targets)][["cosmic_sample_id", "genetic_feature", "is_mutated"]]
cosmic_ids = []
genetic_features = {}
for feature in df.genetic_feature.unique():
genetic_features[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
df_cl = df[df.cosmic_sample_id == cl_id]
for feature in genetic_features:
mutation_status = df_cl[
df_cl.genetic_feature == feature]["is_mutated"].iloc[0]
genetic_features[feature].append(mutation_status)
df1 = pd.DataFrame()
df1.insert(0, "cell_line_id", cosmic_ids) # Insert column with samples IDs
for feature in genetic_features:
df1[feature] = genetic_features[feature]
self.mutation_data = df1 # Put DataFrame into corresponding field
def extract_cnv_data(self, cnv_binary_df):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in df.genetic_feature.unique():
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = df[
(df.cosmic_sample_id == cl_id) & (df.genetic_feature == feature)]["is_mutated"].iloc[0]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_cnv_data_faster(self, cnv_binary_df, map_cl_id_and_feature_to_status):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Faster implementation than original "extract_cnv_data" by using mapping between genes and
genomic segments.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in features_to_extract:
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = map_cl_id_and_feature_to_status[(cl_id, feature)]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_tissue_data(self, cell_line_list):
"""Generate (dummy encoded) data with cell line tissue type.
Arguments:
cell_line_list (DataFrame): Cell line list from GDSC.
Returns:
None
"""
df = cell_line_list[
cell_line_list["COSMIC_ID"].isin(self.screened_cell_lines)][["COSMIC_ID", "Tissue"]]
df.rename(columns={"COSMIC_ID": "cell_line_id"}, inplace=True)
self.tissue_data = pd.get_dummies(df, columns = ["Tissue"])
def extract_merck_signatures_data(self, signatures_df):
"""Generate data with gene expression signature scores for GDSC cell lines, provided by Merck.
Arguments:
signatures_df (DataFrame): DataFrame with gene signatures for cell lines.
Returns:
None
"""
# Compute list of screened cell lines as strings with prefix "X" in order to match
# signatures DataFrame columns
cell_lines_str = ["X" + str(cl) for cl in self.screened_cell_lines]
# Compute list of cell lines that are contained in signatures data
cls_to_extract = [cl for cl in cell_lines_str
if cl in list(signatures_df.columns)]
# Extract desired subset of signatures data
signatures_of_interest = signatures_df[cls_to_extract]
# Transpose the DataFrame
signatures_t = signatures_of_interest.transpose()
# Create a list of cell line IDs whose format matches rest of the data
cl_ids = pd.Series(signatures_t.index).apply(lambda x: int(x[1:]))
# Insert proper cell line IDs as a new column
signatures_t.insert(0, "cell_line_id", list(cl_ids))
# Drop the index and put computed DataFrame in an instance field
self.merck_signatures = signatures_t.reset_index(drop=True)
def concatenate_data(self, data_combination):
"""Generate data containing chosen combination of genetic data classes.
Arguments:
data_combination: List of strings containing data classes to be included. Available options are:
"mutation", "expression", "CNV", "tissue", "merck signatures".
Returns:
None
"""
# Create a list of DataFrames to include
objects = [self.response_data]
if "mutation" in data_combination and self.mutation_data.shape[0] > 0:
objects.append(self.mutation_data)
if "expression" in data_combination and self.gene_expression_data.shape[0] > 0:
objects.append(self.gene_expression_data)
if "CNV" in data_combination and self.cnv_data.shape[0] > 0:
objects.append(self.cnv_data)
if "tissue" in data_combination and self.tissue_data.shape[0] > 0:
objects.append(self.tissue_data)
if "merck signatures" in data_combination and self.merck_signatures.shape[0] > 0:
objects.append(self.merck_signatures)
# Find intersection in cell lines for all desirable DataFrames
cl_intersection = set(list(self.response_data["cell_line_id"]))
for obj in objects:
cl_intersection = cl_intersection.intersection(set(list(obj["cell_line_id"])))
objects_common = []
for obj in objects:
objects_common.append(obj[obj["cell_line_id"].isin(cl_intersection)])
# Check if all DataFrames have the same number of samples
no_samples = objects_common[0].shape[0]
for obj in objects_common:
assert obj.shape[0] == no_samples
obj.sort_values("cell_line_id", inplace=True)
obj.reset_index(drop=True, inplace=True)
cl_ids = objects_common[0]["cell_line_id"]
df_concatenated = | pd.concat(objects_common, axis=1, ignore_index=False) | pandas.concat |
# -- coding: utf-8 --
'''
Run a test on the argument convincingness dataset, but use something like ten-fold cross validation,
rather than splitting the data by topic.
Unlike standard cross validation, we use only 1/10th of the data in each fold as training data, and test on prediction
for all items. This means that we use roughly half of the unique pairs, with only one annotator per pair.
Background:
- the gold standard was defined by MACE
- with several annotators per pair, and many pairs from the training topics, the individual biases cancel out
- this means there is not much benefit to learning the model of the consensus function from using crowdGPPL in the
cross topic setup
- predicting the personalised preferences is also hard because preferences on each topic can be very different from one
another
Hypothesis:
- if we have few data points, and only one label per pair from one worker, worker biases may be important
- if we have a small set of data from the test topics, biases in that data may also be important for both inferring
consensus and personal preferences
- personal predictions may be less reliant on the consensus when we have some pairs for the test topics, because we
can then infer how the workers deviate from the consensus in each topic -- when predicting cross-topic preferences,
any learned personal biases may not be accurate.
'''
import logging
from scipy.stats.stats import kendalltau
from sklearn.metrics import accuracy_score, log_loss
from sklearn.model_selection import KFold
from personalised_tests import PersonalisedTestRunner
from tests import get_docidxs_from_ids, get_doc_token_seqs
logging.basicConfig(level=logging.DEBUG)
import sys
import os
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/analysis/habernal_comparison")
svm_python_path = '~/libsvm-3.22/python'
sys.path.append(os.path.expanduser("~/git/HeatMapBCC/python"))
sys.path.append(os.path.expanduser("~/git/pyIBCC/python"))
sys.path.append(os.path.expanduser("~/data/personalised_argumentation/embeddings/skip-thoughts"))
sys.path.append(os.path.expanduser("~/data/personalised_argumentation/embeddings/Siamese-CBOW/siamese-cbow"))
sys.path.append(os.path.expanduser(svm_python_path))
import time
import pandas as pd
import numpy as np
class RandomSelectionTestRunner(PersonalisedTestRunner):
def __init__(self, current_expt_output_dir):
self.folds = None
self.initial_pair_subset = {}
self.default_ls_values = {}
self.expt_output_dir = current_expt_output_dir
self.vscales = [] # record the latent factor scales
def _choose_method_fun(self, feature_type):
if 'crowdBT' in self.method:
method_runner_fun = self.run_crowd_bt
elif 'cBT_GP' in self.method:
method_runner_fun = self.run_crowd_bt_gpr
else:
method_runner_fun = super(RandomSelectionTestRunner, self)._choose_method_fun(feature_type)
return method_runner_fun
def _set_resultsfile(self, dataset, method):
# To run the active learning tests, call this function with dataset_increment << 1.0.
# To add artificial noise to the data, run with acc < 1.0.
output_data_dir = os.path.join(data_root_dir, 'outputdata/')
if not os.path.isdir(output_data_dir):
os.mkdir(output_data_dir)
output_data_dir = os.path.join(output_data_dir, self.expt_output_dir)
if not os.path.isdir(output_data_dir):
os.mkdir(output_data_dir)
# Select output paths for CSV files and final results
output_filename_template = os.path.join(output_data_dir, '%s_%s')
results_stem = output_filename_template % (dataset, method)
if not os.path.isdir(results_stem):
os.mkdir(results_stem)
pair_pred_file = os.path.join(results_stem, 'pair_pred.csv')
pair_prob_file = os.path.join(results_stem, 'pair_prob.csv')
pair_gold_file = os.path.join(results_stem, 'pair_gold.csv')
ratings_file = os.path.join(results_stem, 'ratings.csv')
results_file = os.path.join(results_stem, 'metrics.csv')
return results_stem, pair_pred_file, pair_prob_file, pair_gold_file, ratings_file, results_file
def run_test_set(self, no_folds, dataset, method):
self.method = method
if self.folds is None or self.dataset != dataset:
self._load_dataset(dataset) # reload only if we use a new dataset
if (dataset == 'UKPConvArgAll' or dataset == 'UKPConvArgStrict' or dataset == 'UKPConvArgCrowd_evalAll') \
and ('IndPref' in method or 'Personalised' in method):
logging.warning(
'Skipping method %s on dataset %s because there are no separate worker IDs.' % (method, dataset))
return
logging.info("**** Running method %s on dataset %s ****" % (method, dataset) )
feature_type = 'both' # can be 'embeddings' or 'ling' or 'both' or 'debug'
embeddings_type = 'word_mean'
self._set_embeddings(embeddings_type)
if 'GP' in method:
self._init_ls(feature_type, embeddings_type)
else:
self.default_ls = []
results_stem, pair_pred_file, pair_prob_file, pair_gold_file, ratings_file, results_file = self._set_resultsfile(dataset, method)
np.random.seed(121) # allows us to get the same initialisation for all methods/feature types/embeddings
# performance metrics are saved in a CSV file, with rows for each fold, and columns for each data type
# predictions are saved in a CSV file, columns correspond to data points.
# For ratings, the first column is gold, but for pairs the gold is in a separate file (because the pairs are different in each fold)
try:
pair_pred = pd.read_csv(pair_pred_file).values.tolist()
except:
pair_pred = []
try:
pair_prob = pd.read_csv(pair_prob_file).values.tolist()
except:
pair_prob = []
try:
rating_pred = pd.read_csv(ratings_file).values.tolist()
except:
rating_pred = []
try:
metrics = pd.read_csv(results_file).values.tolist()
except:
metrics = []
pair_gold_by_fold = []
a1 = []
a2 = []
pair_gold = []
pair_person = []
rating_a = []
rating_gold = []
rating_person = []
X_a1 = []
X_a2 = []
text_a1 = []
text_a2 = []
# load the data from all topics
for topic in self.folds:
# X_train_a1, X_train_a2 are lists of lists of word indexes
X_topic_a1, X_topic_a2, prefs_topic, ids_topic, person_topic, text_topic_a1, text_topic_a2 = self.folds.get(topic)["test"]
testids = np.array([ids_pair.split('_') for ids_pair in ids_topic])
a1_topic = get_docidxs_from_ids(self.docids, testids[:, 0], )
a2_topic = get_docidxs_from_ids(self.docids, testids[:, 1])
a1 = a1 + a1_topic.tolist()
a2 = a2 + a2_topic.tolist()
X_a1 = X_a1 + X_topic_a1
X_a2 = X_a2 + X_topic_a2
text_a1 = text_a1 + text_topic_a1
text_a2 = text_a2 + text_topic_a2
print(("Topic instances ", len(X_topic_a1), " test labels ", len(prefs_topic)))
pair_gold = pair_gold + prefs_topic
pair_person = pair_person + person_topic
_, ratings_topic, argids_topic, person_rank_topic, _ = self.folds_r.get(topic)["test"]
item_idx_topic = [np.argwhere(itemid == self.docids)[0][0] for itemid in argids_topic]
rating_a = rating_a + item_idx_topic
rating_gold = rating_gold + ratings_topic
rating_person = rating_person + person_rank_topic
# map all the person IDs to consecutive indexes
upersonIDs, pair_person = np.unique(pair_person, return_inverse=True)
rating_person = np.array([np.argwhere(upersonIDs == p.strip())[0][0] if p.strip() in upersonIDs else -1
for p in rating_person])
X, uids, utexts = get_doc_token_seqs((a1, a2), [X_a1, X_a2], (text_a1, text_a2))
self.X = X
if len(rating_pred) is 0:
rating_pred = [rating_gold] # first row is gold
| pd.DataFrame(rating_pred) | pandas.DataFrame |
import pandas as pd
from .functions import sort_strings
class FeatureNotSupported(Exception):
"""Not supported Feature type."""
pass
class BaseFeature(object):
"""Base Feature class that every other Feature Class should inherit from.
"""
feature_type = None
"""
feature_type: should be overrode by Class inheriting BaseFeature.
"""
def data(self):
"""To be overrode."""
raise NotImplemented
def mapping(self):
"""To be overrode."""
raise NotImplemented
class CategoricalFeature(BaseFeature):
"""Categorical Feature class. Inherits from BaseFeature.
Categorical Features are those that have values that are limited and/or fixed in their nature.
However, it doesn't mean that we can't analyze them in similar manner to Numerical Features - e.g.
calculate mean, distribution and other variables. In order to do so, every unique value in the data needs to be
assigned a unique number, which will allow calculations.
In case of Categorical Features, they might already be represented in the data with key of some sort:
"A": "Apple"
"B": "Banana"
This structure is also present in the dict descriptions that can be fed to the analysis.
Raw mapping would associate those values with the unique numbers created during the mapping:
"A": 1
"B": 2
CategoricalFeature class creates mapping between new unique numbers present in the data
and the description (item) of the already provided mapping:
1: "Apple"
2: "Banana"
This way of mapping things should ease out mental connections between what is seen in the visualizations
(numbers mostly) and whats really represented behind those numbers (instead of their symbols).
Class Attributes:
feature_type: "Categorical" hardcoded string
Attributes:
series (pandas.Series): Series holding the data
name (str): name of the Feature
description (str): description of the Feature
imputed_category (bool): flag indicating if the category of the Feature was provided or imputed
transformed (bool): flag indicating if the Feature is pre-transformed or not
mapping (dict): dictionary holding external mapping of values to their 'logical' counterparts
raw_mapping (dict): mapping between value -> raw number
mapped_series (pandas.Series): Series with it's content replaced with raw_mapping
"""
feature_type = "Categorical"
def __init__(self, series, name, description, imputed_category, transformed=False, mapping=None):
"""Construct new CategoricalFeature object.
Additionally create raw_mapping and mapped_series attributes.
Args:
series (pandas.Series): Series holding the data (copy)
name (str): name of the Feature
description (str): description of the Feature
imputed_category (bool): flag indicating if the category of the Feature was provided or imputed
transformed (bool, Optional): flag indicating if the Feature is pre-transformed or not, defaults to False
mapping (dict): dictionary holding external mapping of values to their 'logical' counterparts, defaults
to None
"""
self.series = series.copy()
self.name = name
self.description = description
self.imputed_category = imputed_category # flag to check if type of feature was provided or imputed
self.transformed = transformed # flag to check if the feature is already transformed
self.original_mapping = mapping
self.raw_mapping = self._create_raw_mapping()
self.mapped_series = self._create_mapped_series()
self._descriptive_mapping = None
def data(self):
"""Return mapped_series property."""
return self.mapped_series
def original_data(self):
"""Return original Series."""
return self.series
def mapping(self):
"""Return _descriptive_mapping attribute and if it's None, create it with _create_descriptive_mapping method."""
if not self._descriptive_mapping:
self._descriptive_mapping = self._create_descriptive_mapping()
return self._descriptive_mapping
def _create_mapped_series(self):
"""Return series property with it's content replaced with raw_mapping dictionary."""
return self.series.replace(self.raw_mapping)
def _create_raw_mapping(self):
"""Return dictionary of 'unique value': number pairs.
Replace every categorical value with a number starting from 1 (sorted alphabetically). Starting with 1
to be consistent with "count" obtained with .describe() methods on dataframes.
Returns:
dict: 'unique value': number pairs dict.
"""
values = sorted(self.series.unique(), key=str)
mapped = {value: number for number, value in enumerate(values, start=1) if not | pd.isna(value) | pandas.isna |
import numpy as np
np.warnings.filterwarnings('ignore') #to not display numpy warnings... be careful
import pandas as pd
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
from orca import *
from orca.data import *
from datetime import datetime
import warnings
from ptreeopt.tree import PTree
warnings.filterwarnings('ignore')
# this whole script will run on all processors requested by the job script
with open('orca/data/scenario_names_all.txt') as f:
scenarios = f.read().splitlines()
with open('orca/data/demand_scenario_names_all.txt') as f:
demand_scenarios = f.read().splitlines()
calc_indices = False
climate_forecasts = False
simulation = True
tree_input_files = False
indicator_data_file = False
window_type = 'rolling'
window_length = 40
index_exceedence_sac = 8
shift = 0
SHA_shift = shift
ORO_shift = shift
FOL_shift = shift
SHA_baseline = pd.read_csv('orca/data/baseline_storage/SHA_storage.csv',parse_dates = True, index_col = 0)
SHA_baseline = SHA_baseline[(SHA_baseline.index >= '2006-09-30') & (SHA_baseline.index <= '2099-10-01')]
ORO_baseline = pd.read_csv('orca/data/baseline_storage/ORO_storage.csv',parse_dates = True, index_col = 0)
ORO_baseline = ORO_baseline[(ORO_baseline.index >= '2006-09-30') & (ORO_baseline.index <= '2099-10-01')]
FOL_baseline = pd.read_csv('orca/data/baseline_storage/FOL_storage.csv',parse_dates = True, index_col = 0)
FOL_baseline = FOL_baseline[(FOL_baseline.index >= '2006-09-30') & (FOL_baseline.index <= '2099-10-01')]
features = json.load(open('orca/data/json_files/indicators_whole_bounds.json'))
feature_names = []
feature_bounds = []
indicator_codes = []
min_depth = 4
for k,v in features.items():
indicator_codes.append(k)
feature_names.append(v['name'])
feature_bounds.append(v['bounds'])
action_dict = json.load(open('orca/data/json_files/action_list.json'))
actions = action_dict['actions']
snapshots = pickle.load(open('snapshots/training_scenarios_seed_2.pkl', 'rb'))
P = snapshots['best_P'][-1][0]
demand_indicators = {}
for D in demand_scenarios:
dfdemand = pd.read_csv('orca/data/demand_files/%s.csv'%D, index_col = 0, parse_dates = True)
dfdemand['demand_multiplier'] = dfdemand['combined_demand']
dfd_ind = pd.DataFrame(index = dfdemand.index)
for i in features: #indicators
ind = features[i]
if ind['type'] == 'demand':
if ind['delta'] == 'no':
if ind['stat'] == 'mu':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).mean()*100
elif ind['stat'] == 'sig':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).std()*100
elif ind['stat'] == 'max':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).max()*100
else:
if ind['stat'] == 'mu':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).mean().pct_change(periods=ind['delta'])*100
elif ind['stat'] == 'sig':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).std().pct_change(periods=ind['delta'])*100
elif ind['stat'] == 'max':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).max().pct_change(periods=ind['delta'])*100
elif ind['type'] == "discount":
discount_indicator = i
demand_indicators[D] = dfd_ind
indicator_columns = []
comm = MPI.COMM_WORLD # communication object
rank = comm.rank # what number processor am I?
sc = scenarios[rank]
call(['mkdir', 'orca/data/scenario_runs/%s'%sc])
if calc_indices:
gains_loop_df = pd.read_csv('orca/data/historical_runs_data/gains_loops.csv', index_col = 0, parse_dates = True)
OMR_loop_df = pd.read_csv('orca/data/historical_runs_data/OMR_loops.csv', index_col = 0, parse_dates = True)
input_df = pd.read_csv('orca/data/input_climate_files/%s_input_data.csv'%sc, index_col = 0, parse_dates = True)
proj_ind_df, ind_df = process_projection(input_df,gains_loop_df,OMR_loop_df,'orca/data/json_files/gains_regression.json','orca/data/json_files/inf_regression.json',window = window_type)
proj_ind_df.to_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc))
ind_df.to_csv('orca/data/scenario_runs/%s/hydrologic-indicators-%s.csv'%(sc,sc))
# proj_ind_df = pd.read_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc),index_col = 0, parse_dates = True)
WYI_stats_file = pd.read_csv('orca/data/forecast_regressions/WYI_forcasting_regression_stats.csv', index_col = 0, parse_dates = True)
carryover_stats_file = pd.read_csv('orca/data/forecast_regressions/carryover_regression_statistics.csv', index_col = 0, parse_dates = True)
print('indices done')
if climate_forecasts:
proj_ind_df = pd.read_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc), index_col = 0, parse_dates = True)
forc_df= projection_forecast(proj_ind_df,WYI_stats_file,carryover_stats_file,window_type,window_length, index_exceedence_sac)
forc_df.to_csv('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(sc,sc))
print('forecast done')
if tree_input_files:
discount_vals = np.load('orca/data/random-samples/discount_rates.npy')
random_demands = np.load('orca/data/random-samples/random_demands.npy')
forc_df = | pd.read_csv('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(sc,sc), index_col = 0, parse_dates = True) | pandas.read_csv |
from concurrent.futures import ProcessPoolExecutor, as_completed
from itertools import combinations
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from networkx.algorithms.centrality import edge_betweenness_centrality
from numpy import log
from scipy.special import betaln
from .dendrogram import extract_all_nodes
from ALLCools.plot.dendro import *
def linkage_to_graph(linkage):
"""Turn the linkage matrix into a graph, an epimutation will just be remove one edge from the graph"""
_linkage = linkage.astype(int)
n_leaf = _linkage.shape[0] + 1
edges = []
for i in range(_linkage.shape[0]):
cur_node = i + n_leaf
left, right, *_ = _linkage.iloc[i]
edges.append([left, cur_node])
edges.append([right, cur_node])
g = nx.Graph()
g.add_edges_from(edges)
return g
def cut_by_highest_betweenness_centrality(g):
# order graph node by betweenness_centrality
highest_centrality_edge = pd.Series(edge_betweenness_centrality(g)).sort_values(ascending=False).index[0]
_g = g.copy()
_g.remove_edge(*highest_centrality_edge)
left_tree, right_tree = nx.connected_component_subgraphs(_g)
return left_tree, right_tree, highest_centrality_edge
def log_proba_beta_binomial(x, n, a, b):
"""log likelihood for the beta-binomial dist, ignore part not related to a and b."""
like = betaln((a + x), (b + n - x)) - betaln(a, b)
# when a or b has 0, like will have nan
return like.fillna(0)
def parse_one_pattern(tree_g, edges_to_remove, mc_df, cov_df):
"""
for a particular epimutation combination (edges_to_remove),
calculate the a and b for beta-binomial dist in each leaf node group.
after removing the edges (epimutations),
the leaf node group are leaf nodes in each of the disconnected sub graph.
"""
group_mc_df = mc_df.copy()
group_un_mc_df = cov_df - group_mc_df
sub_g = tree_g.copy()
if len(edges_to_remove) > 0: # this is the case of adding empty edge by left-right combine
sub_g.remove_edges_from(edges_to_remove)
# get disconnected sub-graphs
sub_tree = nx.connected_component_subgraphs(sub_g)
# for each sub-graph, add up the mc and un-mc of all leaf nodes for group a, b in beta-binomial dist
for _tree in sub_tree:
judge = group_mc_df.columns.isin(_tree.nodes)
if judge.sum() == 0:
# if sub-graph do not have leaf nodes, skip this sub-graph
continue
group_mc_df.loc[:, judge] = group_mc_df.loc[:, judge].sum(
axis=1).values[:, None]
group_un_mc_df.loc[:, judge] = group_un_mc_df.loc[:, judge].sum(
axis=1).values[:, None]
# group_mc_df is a, group_un_mc_df is b for beta-binomial dist
# each group of leaf nodes share same a, b
return group_mc_df, group_un_mc_df
def mutation_likelihood(n_mutation, p_mutation, n_edges):
lp0 = n_mutation * log(p_mutation) + \
(n_edges - n_mutation) * log(1 - p_mutation)
return lp0
def _max_likelihood_tree_worker(tree_g, mc_df, cov_df, max_mutation=2, p_mutation=0.1, sub_tree_cutoff=12):
top_n = 1
n_edges = len(tree_g.edges)
max_mutation = min(n_edges, max_mutation)
record_names = mc_df.index
if n_edges > sub_tree_cutoff:
# cut the tree into left and right in the edge that has biggest betweenness_centrality
# calculate best patterns for left and right separately, and then joint consider the overall pattern
left_tree, right_tree, removed_edge = cut_by_highest_betweenness_centrality(tree_g)
left_best_patterns, _ = _max_likelihood_tree_worker(
left_tree,
mc_df=mc_df.loc[:, mc_df.columns.isin(left_tree.nodes)],
cov_df=cov_df.loc[:, cov_df.columns.isin(left_tree.nodes)],
max_mutation=max_mutation, p_mutation=p_mutation, sub_tree_cutoff=sub_tree_cutoff)
right_best_patterns, _ = _max_likelihood_tree_worker(
right_tree,
mc_df=mc_df.loc[:, mc_df.columns.isin(right_tree.nodes)],
cov_df=cov_df.loc[:, cov_df.columns.isin(right_tree.nodes)],
max_mutation=max_mutation, p_mutation=p_mutation, sub_tree_cutoff=sub_tree_cutoff)
# for each DMR, go through all possible combination of best left and right pattern,
# when not exceed max_mutation, also consider whether should we add the removed edge or not
best_pattern_final = {}
likelihood_final = {}
for record_name in record_names:
_this_mc_df = mc_df.loc[[record_name]]
_this_cov_df = cov_df.loc[[record_name]]
left_patterns = list(left_best_patterns[record_name]) + [()] # add empty choice
right_patterns = list(right_best_patterns[record_name]) + [()] # add empty choice
middle_patterns = [[removed_edge], []]
# list all possible combined patterns
pattern_dict = {}
for left_i, left_pattern in enumerate(left_patterns):
for right_i, right_pattern in enumerate(right_patterns):
for middle_pattern in middle_patterns:
joint_pattern = (list(left_pattern) if len(left_pattern) != 0 else []) + (
list(right_pattern) if len(right_pattern) != 0 else []) + (
list(middle_pattern) if len(middle_pattern) != 0 else [])
_n_mutation = len(joint_pattern)
if _n_mutation > max_mutation:
continue
_this_group_mc_df, _this_group_un_mc_df = parse_one_pattern(
tree_g, joint_pattern, _this_mc_df, _this_cov_df)
# calculate tree likelihood on current pattern for all DMR
dmr_tree_likelihood = log_proba_beta_binomial(
_this_mc_df, _this_cov_df, _this_group_mc_df, _this_group_un_mc_df).values.sum()
# add mutation prior to tree likelihood, save to records
lp0 = mutation_likelihood(_n_mutation, p_mutation, n_edges)
try:
pattern_dict[_n_mutation][tuple(joint_pattern)] = dmr_tree_likelihood + lp0
except KeyError:
pattern_dict[_n_mutation] = {tuple(joint_pattern): dmr_tree_likelihood + lp0}
_this_final_pattern = []
_this_final_likelihood = []
for _n_mutation, _n_mutation_patterns in pattern_dict.items():
if _n_mutation != 0:
_s = pd.Series(_n_mutation_patterns).sort_values(ascending=False)[:top_n]
_this_final_pattern += _s.index.tolist()
_this_final_likelihood += _s.tolist()
else:
# empty pattern
_this_final_pattern += [()]
_this_final_likelihood += list(_n_mutation_patterns.values())
best_pattern_final[record_name] = np.array(_this_final_pattern)
likelihood_final[record_name] = np.array(_this_final_likelihood)
return pd.Series(best_pattern_final), pd.Series(likelihood_final)
else:
records = {}
mutation_patterns = {}
for n_mutation in range(1, max_mutation + 1):
# Prior probability of the mutations, which is same for each n_mutation
lp0 = n_mutation * log(p_mutation) + \
(n_edges - n_mutation) * log(1 - p_mutation)
# each epimutation is removing one edge from the graph
# for N epimutation, the result graph contain N + 1 disconnected sub-graph
for i, edges in enumerate(combinations(tree_g.edges, n_mutation)):
# get a and b for beta-binomial dist
group_mc_df, group_un_mc_df = parse_one_pattern(tree_g, edges, mc_df, cov_df)
# calculate tree likelihood on current pattern for all DMR
dmr_tree_likelihood = log_proba_beta_binomial(mc_df, cov_df,
group_mc_df, group_un_mc_df).sum(axis=1)
# add mutation prior to tree likelihood, save to records
records[(n_mutation, i)] = dmr_tree_likelihood + lp0
mutation_patterns[(n_mutation, i)] = edges
# records_df: each row is a DMR record, each column is a (n_mutation, mutation_pattern_idx)
records_df = pd.DataFrame(records)
# mutation_pattern_series, index is (n_mutation, mutation_pattern_idx), value is the actual mutation pattern
mutation_pattern_series = pd.Series(mutation_patterns)
def __get_row_best_patterns(_row):
_row_best_patterns = []
_row_best_likelihoods = []
for group, sub_row in _row.groupby(_row.index.get_level_values(0)):
# index is pattern id, value is likelihood
selected_pattern = sub_row.sort_values(ascending=False)[:top_n]
_row_best_patterns.append(mutation_pattern_series.loc[selected_pattern.index])
_row_best_likelihoods.append(selected_pattern)
return pd.concat(_row_best_patterns).values, | pd.concat(_row_best_likelihoods) | pandas.concat |
import os
import time
import load_data
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from models.LSTM import LSTMClassifier
import argparse
import pandas as pd
import ipdb
TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_data.load_dataset()
def clip_gradient(model, clip_value):
params = list(filter(lambda p: p.grad is not None, model.parameters()))
for p in params:
p.grad.data.clamp_(-clip_value, clip_value)
def train_model(model, train_iter, epoch):
total_epoch_loss = 0
total_epoch_acc = 0
model.cuda()
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))
steps = 0
model.train()
for idx, batch in enumerate(train_iter):
text = batch.text[0]
target = batch.label
target = torch.autograd.Variable(target).long()
if torch.cuda.is_available():
text = text.cuda()
target = target.cuda()
if (text.size()[0] is not 32):
# One of the batch returned by BucketIterator has length different than 32.
continue
optim.zero_grad()
prediction = model(text)
loss = loss_fn(prediction, target)
num_corrects = (torch.max(prediction, 1)[1].view(target.size()).data == target.data).float().sum()
acc = 100.0 * num_corrects/len(batch)
loss.backward()
clip_gradient(model, 1e-1)
optim.step()
steps += 1
#ipdb.set_trace()
if steps % 100 == 0:
print(f'Epoch: {epoch+1}, Idx: {idx+1}, Training Loss: {loss.item():.4f}, Training Accuracy: {acc.item(): .2f} percent')
total_epoch_loss += loss.item()
total_epoch_acc += acc.item()
return total_epoch_loss/len(train_iter), total_epoch_acc/len(train_iter)
def eval_model(model, val_iter):
# adding for error analysis
ex_list = []
total_epoch_loss = 0
total_epoch_acc = 0
model.eval()
with torch.no_grad():
for idx, batch in enumerate(val_iter):
text = batch.text[0]
if (text.size()[0] is not 32):
continue
target = batch.label
target = torch.autograd.Variable(target).long()
if torch.cuda.is_available():
text = text.cuda()
target = target.cuda()
prediction = model(text)
loss = loss_fn(prediction, target)
num_corrects = (torch.max(prediction, 1)[1].view(target.size()).data == target.data).sum()
acc = 100.0 * num_corrects/len(batch)
total_epoch_loss += loss.item()
total_epoch_acc += acc.item()
#error analysis:
for i in range(len(batch)):
ex = {"text" : " ".join([TEXT.vocab.itos[x] for x in batch.text[0][i] if x != 1]),
"length" : len([TEXT.vocab.itos[x] for x in batch.text[0][i] if x != 1]),
"pred" : torch.max(prediction, 1)[1][i].item(),
"target" : target[i].item(),
"correct" : 1 if torch.max(prediction, 1)[1][i].item() == target[i].item() else 0
}
ex_list.append(ex)
#ipdb.set_trace()
return total_epoch_loss/len(val_iter), total_epoch_acc/len(val_iter), ex_list
# TODO: turn these into arguments / manually change parameters
# learning rate: 2e-3, 2e-4, 2e-5
# hidden_size: 128, 256, 512
# embedding_length: 150, 300, 600
# 3x3x3 = 27 models (don't worry about random seeds for now)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--learning-rate', '-l', help="learning rate", type=float, default=2e-5)
parser.add_argument('--batch-size', '-b', help="batch size", type=int, default=32)
parser.add_argument('--output-size', '-o', type=int, default=3)
parser.add_argument('--hidden-size', '-h', type=int, default=256)
parser.add_argument('--embedding-length', '-e', type=int, default=300)
args = parser.parse_args()
learning_rate = args.learning_rate
batch_size = args.batch_size
output_size = args.output_size
hidden_size = args.hidden_size
embedding_length = args.embedding_length
print("Hyperparams:", args)
model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)
loss_fn = F.cross_entropy
for epoch in range(10):
train_loss, train_acc = train_model(model, train_iter, epoch)
val_loss, val_acc, _ = eval_model(model, valid_iter)
print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%')
test_loss, test_acc, ex_list = eval_model(model, test_iter)
print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
# TODO: find a way to save models/results?
filename = f"results_{args.learning_rate}_{args.hidden_size}_{args.embedding_length}.csv"
df = | pd.DataFrame(ex_list) | pandas.DataFrame |
#-*- coding:utf-8 -*-
import sys
import os
import pandas as pd
import numpy as np
import time
from collections import deque
import copy
# need import understand, and set PYTHONPATH of the UNDERSTAND tool
sys.path.append(r'C:\SciTools\bin\pc-win64\Python')#Installation path of the UNDERSTAND tool
import understand#If PYTHONPATH is set correctly, the import statement will run correctly even if an error is reported in the IDE.
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth',200)
np.set_printoptions(threshold=np.inf)
# Label comparison
# According to the code comparison result of FUNCTION_getEquivalancePartition_bool, determine whether labels are inconsistent labels.
def FUNCTION_labelComparison(twoDimensionalArray_sameCodeVersions_bool,array_labels_allVersions,array_versionNum_allVersions):
# Examples for testing
# twoDimensionalArray_sameCodeVersions_bool = np.array([[ True, False, False, True, False, True],
# [False, True, False, False, True, False],
# [False, False, False, False, False, False],
# [False, False, False, False, False, False],
# [False, False, False, False, False, False],
# [False, False, False, False, False, False]])
# array_labels_allVersions = np.array([1,1,0,0,1,1])
list_versionSet_IL = []
for i_row_bool in twoDimensionalArray_sameCodeVersions_bool:
array_labels_i_row = array_labels_allVersions[i_row_bool];
if array_labels_i_row.size:
if not ((array_labels_i_row == 1).all() or (array_labels_i_row == 0).all()):
list_versionSet_IL_oneVersionSet = array_versionNum_allVersions[i_row_bool]
list_versionSet_IL.append(list_versionSet_IL_oneVersionSet);
return list_versionSet_IL;
# Code comparison
# Judging which versions of each cross-version module have the same code is equivalent to dividing equivalence classes.
def FUNCTION_getEquivalancePartition_bool(X1):
# Examples for testing
# Suppose the code (string) of X1 on five versions is as follows:
# X1=['string_1','string_2','string_1','string_2','string_1']
# X1=['aa','ab','ac','aa','ab','aa']
num_versions = len(X1)
array_bool_X1 = np.zeros((num_versions, num_versions), dtype=bool)
array_bool_notFinded = np.ones(num_versions, dtype = bool)
for i in range(0,num_versions):
if array_bool_notFinded[i]:
string_first_file = X1[i];
for j in range(i+1,num_versions):
string_second_file = X1[j];
if string_first_file == string_second_file:
array_bool_notFinded[i] = False;
array_bool_notFinded[j] = False;
array_bool_X1[i][i] = True;
array_bool_X1[i][j] = True;
return array_bool_X1;
# Filter comments, whitespace, and blank lines in the code (.udb file) of a module
def FUNCTION_getFilteredCode(fileEntity):
sEnt = fileEntity;
str_codeOneFile = '';
file=startLine=endLine='';
if sEnt.kind().check("file"):
file = sEnt;
# The line numbers corresponding to the beginning and end of the module in the file
startLine = 1;
endLine = sEnt.metric(["CountLine"])["CountLine"];
else:
file = sEnt.ref().file();
# The line numbers corresponding to the beginning and end of the module in the file
startRef = sEnt.refs("definein","",True)[0];
endRef = sEnt.refs("end","",True)[0];
startLine = startRef.line();
endLine = endRef.line();
# The lexical stream pointer for the file
lexer = file.lexer();
# The token stream pointer of the module (content from the start line to the end line)
lexemes = lexer.lexemes(startLine, endLine);
length = len(lexemes);
while (length == 0):
endLine = endLine - 1;
lexemes = lexer.lexemes(startLine, endLine);
length = len(lexemes);
# Is the current token in the middle of multiple consecutive white spaces
in_whitespace = 0;
# Scan backward from the first token in turn, and replace the consecutive blank characters with a space, and the other contents remain unchanged
for lexeme in lexemes:
# If the current token is white space (including comments, spaces, and line breaks)
if ( lexeme.token() == "Comment" or lexeme.token() == "Whitespace" or lexeme.token() == "Newline"):
# If it is the first white space character
if not in_whitespace:
# Add a white space to the result string
str_codeOneFile = str_codeOneFile + "";
# Remember a white space that has been encountered
in_whitespace = 1;
else:#If it is not a white space character
str_codeOneFile = str_codeOneFile + lexeme.text();
in_whitespace = 0;
return str_codeOneFile;
# Search the corresponding file entity according to the relname of cross-version module
def FUNCTION_searchCrossVersionInstance_special(db,searchFileName,InstanceID_udbType):
if InstanceID_udbType == 'file':
allfiles = db.ents("file ~unknown ~unresolved");
for file in allfiles:
if file.relname().find(searchFileName)!=-1:
return file;
if InstanceID_udbType == 'class':
allfiles = db.ents('class ~unknown ~unresolved, interface ~unknown ~unresolved');
for file in allfiles:
if file.longname().find(searchFileName)!=-1:
return file;
# Search the corresponding file entity according to the relname of cross-version module
def FUNCTION_searchCrossVersionInstance(db,searchFileName,InstanceID_udbType):
if InstanceID_udbType == 'file':
allfiles = db.ents("file ~unknown ~unresolved");
for file in allfiles:
if file.relname() == searchFileName:
return file;
if InstanceID_udbType == 'class':
allfiles = db.ents('class ~unknown ~unresolved, interface ~unknown ~unresolved');
for file in allfiles:
if file.longname() == searchFileName:
return file;
# Read the code in the .udb file
def FUNCTION_readModuleCode(fileUdbPath_i_version,i_crossVersionModule,InstanceID_udbType,dataset_style):
# Open Database
db = understand.open(fileUdbPath_i_version);
# Find the file entity corresponding to the cross-version module from the .udb file
if dataset_style == 'IND-JLMIV+R-2020':
fileEntity = FUNCTION_searchCrossVersionInstance_special(db,i_crossVersionModule,InstanceID_udbType);
else:
fileEntity = FUNCTION_searchCrossVersionInstance(db,i_crossVersionModule,InstanceID_udbType);
# Filter comments, blanks, and blank lines of code in the module
str_codeOneFile = FUNCTION_getFilteredCode(fileEntity);
# close database
db.close();
return str_codeOneFile;
# For the specific dataset used, the file name needs to be normalized, otherwise the corresponding module may not be found in the downloaded project code.
def FUNCTION_ReverseHandleSpecialProject(i_crossVersionModule,projectName,versionNumber):
#需要减去"src"前缀的项目列表
list_projectName_needPrefix1 = ["commons-jcs","commons-bcel","commons-beanutils","commons-codec","commons-compress","commons-digester",
"commons-io","commons-net","systemml","wss4j","nutch"];#根据交集结果,统计需要加src的项目
#需要减去"deltaspike"前缀的项目列表
list_projectName_needPrefix2 = ["deltaspike"];
#需要去掉特定字符串之前的前缀
list_projectName_removePrefix = ["tika-0.1","tika-0.2","tika-0.3"];
#需减去"src\\main"前缀的项目版本列表
list_projectSpecificVersion_needPrefix1 = ["commons-math-1.0","commons-math-1.1","commons-math-1.2"];
#需减去"src"前缀的项目版本列表
list_projectSpecificVersion_needPrefix2 = [
"commons-configuration-1.0","commons-configuration-1.1","commons-configuration-1.2","commons-configuration-1.3","commons-configuration-1.4","commons-configuration-1.5","commons-configuration-1.6","commons-configuration-1.7",
"commons-collections-1.0","commons-collections-2.0","commons-collections-2.1","commons-collections-3.0","commons-collections-3.1","commons-collections-3.2","commons-collections-3.3",
"commons-lang-1.0","commons-lang-2.0","commons-lang-2.1","commons-lang-2.2","commons-lang-2.3","commons-lang-2.4",
"commons-math-2.0","commons-math-2.1","commons-math-2.2","commons-math-3.0","commons-math-3.1","commons-math-3.2","commons-math-3.3","commons-math-3.4","commons-math-3.5","commons-math-3.6",
"commons-validator-1.0","commons-validator-1.1.0","commons-validator-1.2.0","commons-validator-1.3.0"];
#需要减去"giraph-core\\src\\"前缀的项目列表
list_projectSpecificVersion_needPrefix3 = ["giraph-0.1.0"];
#需要减去"archiva-modules\\"前缀的项目列表(除4个以"archiva-cli"开头的特定实例外)
list_projectSpecificVersion_needPrefix4 = ["archiva-1.0"];
#需要逆统一变更路径
list_projectName_changePath1 = ["commons-beanutils-1.9.0",
"commons-bcel-6.0","commons-bcel-6.1","commons-bcel-6.2",#除这三个版本外,之前的版本不需要加src
"commons-codec-1.6","commons-codec-1.7","commons-codec-1.8","commons-codec-1.9","commons-codec-1.10","commons-codec-1.11",#除这几个版本外,之前的版本需要加src
"commons-io-2.0","commons-io-2.1","commons-io-2.2","commons-io-2.3","commons-io-2.4","commons-io-2.5",
"commons-net-2.0","commons-net-2.1","commons-net-2.2","commons-net-3.0","commons-net-3.1","commons-net-3.2","commons-net-3.3","commons-net-3.4","commons-net-3.5","commons-net-3.6",
];
list_projectName_changePath2 = ["santuario-java-2.0.0","santuario-java-2.1.0"];
list_projectName_changePath3 = ["commons-collections-4.0","commons-collections-4.1"];
list_projectName_changePath4 = ["commons-configuration-2.0","commons-configuration-2.1","commons-configuration-2.2"];
list_projectName_changePath5 = ["commons-digester-3.0","commons-digester-3.1","commons-digester-3.2"];
list_projectName_changePath6 = ["commons-lang-3.0","commons-lang-3.1","commons-lang-3.2","commons-lang-3.3","commons-lang-3.4","commons-lang-3.5","commons-lang-3.6","commons-lang-3.7"];
list_projectName_changePath7 = ["commons-math-3.0","commons-math-3.1","commons-math-3.2","commons-math-3.3","commons-math-3.4","commons-math-3.5","commons-math-3.6"];
#需要把以下变更反转
#需要把"common""cube""dictionary""job""metadata""storage"开头的加上"core-"
list_projectName_changePath8 = ["kylin-0.6.1","kylin-0.7.1","kylin-1.0.0","kylin-1.1.0","kylin-1.2.0","kylin-1.3.0"];
#需要把"main\\java\\"变为"src\\share"
list_projectName_changePath9 = ["commons-validator-1.4.0","commons-validator-1.5.0","commons-validator-1.6.0"]
#需要把"\\vfs2\\"变为"\\vfs\\"
list_projectName_changePath10 = ["commons-vfs-2.0","commons-vfs-2.1"]
#需要把"\\vfs2\\"变为"\\vfs\\"
#需要把"commons-vfs2\\"变为"core\\"以及"commons-vfs2-sandbox\\"变为"sandbox\\"
list_projectName_changePath11 = ["commons-vfs-2.2"]
#需要把"commons-jcs-core\\src\\main\\java\\org\\apache\\commons\\"变为"src\\java\\org\\apache\\"
#以及"commons-jcs-sandbox\\yajcache\\src\\main\\java\\org\\apache\\commons\\"变为"sandbox\\yajcache\\src\\org\\apache\\"
#以及"src\\experimental\\org\\apache\\commons\\"变为"src\\experimental\\org\\apache\\"
list_projectName_changePath12 = ["commons-jcs-2.0","commons-jcs-2.1","commons-jcs-2.2"]
#jspwiki-2.9.0需要把"src\\org\\apache\\wiki\\"变为"src\\com\\ecyrd\\jspwiki\\"
list_projectName_changePath13 = ["jspwiki-2.9.0"]
#jspwiki-2.10.0需要把"main\\java\\org\\apache\\wiki\\"变为"src\\com\\ecyrd\\jspwiki\\"
#jspwiki-2.10.0需要把"main\\java\\org\\apache\\catalina\\"变为"src\\org\\apache\\catalina\\"
list_projectName_changePath14 = ["jspwiki-2.10.0"]
#需要把"\\knox\\"变为"\\hadoop\\"
list_projectName_changePath15 = ["knox-1.0.0"]
#需要把"framework\\cayenne-modeler\\"变为"modeler\\cayenne-modeler\\"
#需要把"framework\\maven-cayenne-modeler-plugin\\"变为"modeler\\maven-cayenne-modeler-plugin\\"
list_projectName_changePath16 = ["cayenne-3.0.0"]
#需要把"src\\java\\fr\\jayasoft\\"变为"src\\java\\org\\apache\\"
list_projectName_changePath17 = ["ant-ivy-1.4.1"]
#需要把"src\\main\\"变为"src\\"
#需要把"\\dbcp2\\"变为"\\dbcp\\"
list_projectName_changePath18 = ["commons-dbcp-2.0","commons-dbcp-2.1","commons-dbcp-2.2","commons-dbcp-2.3","commons-dbcp-2.4","commons-dbcp-2.5",]
#需要把"src\\main\\"变为"src\\"
#需要把"\\jexl2\\"变为"\\jexl\\"
list_projectName_changePath19 = ["commons-jexl-2.0","commons-jexl-2.1",]
#需要把"main\\"变为"src\\"
#需要把"\\jexl3\\"变为"\\jexl\\"
list_projectName_changePath20 = ["commons-jexl-3.0","commons-jexl-3.1",]
#需要把"parquet-common\\src\\main\\java\\org\\apache\\"变为"parquet-common\\src\\main\\java\\"
#需要把"parquet-avro\\src\\main\\java\\org\\apache\\"变为"parquet-avro\\src\\main\\java\\"
#需要把"parquet-benchmarks\\src\\main\\java\\org\\apache\\"变为"parquet-benchmarks\\src\\main\\java\\"
#需要把"parquet-encoding\\src\\main\\java\\org\\apache\\"变为"parquet-encoding\\src\\main\\java\\"
#需要把"parquet-cascading\\src\\main\\java\\org\\apache\\"变为"parquet-cascading\\src\\main\\java\\"
#需要把"parquet-column\\src\\main\\java\\org\\apache\\"变为"parquet-column\\src\\main\\java\\"
#需要把"parquet-generator\\src\\main\\java\\org\\apache\\"变为"parquet-generator\\src\\main\\java\\"
#需要把"parquet-hadoop\\src\\main\\java\\org\\apache\\"变为"parquet-hadoop\\src\\main\\java\\"
#需要把"parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\org\\apache\\"变为"parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\"
#需要把"parquet-thrift\\src\\main\\java\\org\\apache\\"变为"parquet-thrift\\src\\main\\java\\"
#需要把"parquet-pig\\src\\main\\java\\org\\apache\\"变为"parquet-pig\\src\\main\\java\\"
#需要把"parquet-protobuf\\src\\main\\java\\org\\apache\\"变为"parquet-protobuf\\src\\main\\java\\"
#需要把"parquet-scrooge\\src\\main\\java\\org\\apache\\"变为"parquet-scrooge\\src\\main\\java\\"
#需要把"parquet-tools\\src\\main\\java\\org\\apache\\"变为"parquet-tools\\src\\main\\java\\"
list_projectName_changePath21 = ["parquet-mr-1.7.0",]
#因项目版本重复,需在上述变更完后,再变更
list_projectName_changePath_after1 = [
"commons-collections-4.0","commons-collections-4.1",#除这几个版本外,之前的版本需要加src
"commons-configuration-1.8","commons-configuration-1.9","commons-configuration-1.10","commons-configuration-2.0","commons-configuration-2.1","commons-configuration-2.2",#除这几个版本外,之前的版本需要加src
"commons-lang-2.5","commons-lang-2.6","commons-lang-3.0","commons-lang-3.1","commons-lang-3.2","commons-lang-3.3","commons-lang-3.4","commons-lang-3.5","commons-lang-3.6","commons-lang-3.7",
"commons-digester-2.1","commons-digester-3.0","commons-digester-3.1","commons-digester-3.2",]
#需要逆统一变更路径
project_version = projectName + '-' + versionNumber;
if project_version in list_projectName_changePath1:
i_crossVersionModule = i_crossVersionModule.replace('src\\','main\\',1);#替换开头处
elif project_version in list_projectName_changePath2:
if project_version == "santuario-java-2.0.0":
i_crossVersionModule = i_crossVersionModule.replace('src\\','src\\main\\java\\',1);
if project_version == "santuario-java-2.1.0":
i_crossVersionModule = i_crossVersionModule.replace('src\\','main\\java\\',1);
elif project_version in list_projectName_changePath3:
i_crossVersionModule = i_crossVersionModule.replace('\\collections\\','\\collections4\\',1);
elif project_version in list_projectName_changePath4:
i_crossVersionModule = i_crossVersionModule.replace('\\configuration\\','\\configuration2\\',1);
elif project_version in list_projectName_changePath5:
i_crossVersionModule = i_crossVersionModule.replace('\\digester\\','\\digester3\\',1);
elif project_version in list_projectName_changePath6:
i_crossVersionModule = i_crossVersionModule.replace('\\lang\\','\\lang3\\',1);
elif project_version in list_projectName_changePath7:
i_crossVersionModule = i_crossVersionModule.replace('\\math\\','\\math3\\',1);
elif project_version in list_projectName_changePath8:
string_added = "core-";
string_started = "common\\";
string_started_2 = "cube\\";
string_started_3 = "dictionary\\";
string_started_4 = "job\\";
string_started_5 = "metadata\\";
string_started_6 = "storage\\";
startSubscript = len(string_added);
length_string_started = len(string_started)+startSubscript;
length_string_started_2 = len(string_started_2)+startSubscript;
length_string_started_3 = len(string_started_3)+startSubscript;
length_string_started_4 = len(string_started_4)+startSubscript;
length_string_started_5 = len(string_started_5)+startSubscript;
length_string_started_6 = len(string_started_6)+startSubscript;
if i_crossVersionModule[startSubscript:length_string_started] == string_started \
or i_crossVersionModule[startSubscript:length_string_started_2] == string_started_2 \
or i_crossVersionModule[startSubscript:length_string_started_3] == string_started_3 \
or i_crossVersionModule[startSubscript:length_string_started_4] == string_started_4 \
or i_crossVersionModule[startSubscript:length_string_started_5] == string_started_5 \
or i_crossVersionModule[startSubscript:length_string_started_6] == string_started_6:
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
if project_version == "kylin-0.6.1":
i_crossVersionModule = i_crossVersionModule.replace('\\java\\org\\apache\\kylin\\','\\java\\com\\kylinolap\\',1);
elif project_version in list_projectName_changePath9:
i_crossVersionModule = i_crossVersionModule.replace('src\\share\\','main\\java\\',1);
elif project_version in list_projectName_changePath10:
i_crossVersionModule = i_crossVersionModule.replace('\\vfs\\','\\vfs2\\',1);
elif project_version in list_projectName_changePath11:
i_crossVersionModule = i_crossVersionModule.replace('\\vfs\\','\\vfs2\\',1);
i_crossVersionModule = i_crossVersionModule.replace('core\\','commons-vfs2\\',1);
i_crossVersionModule = i_crossVersionModule.replace('sandbox\\','commons-vfs2-sandbox\\',1);
elif project_version in list_projectName_changePath12:
i_crossVersionModule = i_crossVersionModule.replace('src\\java\\org\\apache\\','commons-jcs-core\\src\\main\\java\\org\\apache\\commons\\',1);
i_crossVersionModule = i_crossVersionModule.replace('sandbox\\yajcache\\src\\org\\apache\\','commons-jcs-sandbox\\yajcache\\src\\main\\java\\org\\apache\\commons\\',1);
i_crossVersionModule = i_crossVersionModule.replace('src\\experimental\\org\\apache\\','src\\experimental\\org\\apache\\commons\\',1);
elif project_version in list_projectName_changePath13:
i_crossVersionModule = i_crossVersionModule.replace('src\\com\\ecyrd\\jspwiki\\','src\\org\\apache\\wiki\\',1);
elif project_version in list_projectName_changePath14:
i_crossVersionModule = i_crossVersionModule.replace('src\\com\\ecyrd\\jspwiki\\','main\\java\\org\\apache\\wiki\\',1);
i_crossVersionModule = i_crossVersionModule.replace('src\\org\\apache\\catalina\\','main\\java\\org\\apache\\catalina\\',1);
elif project_version in list_projectName_changePath15:
i_crossVersionModule = i_crossVersionModule.replace('\\hadoop\\','\\knox\\',1);
elif project_version in list_projectName_changePath16:
i_crossVersionModule = i_crossVersionModule.replace('modeler\\cayenne-modeler\\','framework\\cayenne-modeler\\',1);
i_crossVersionModule = i_crossVersionModule.replace('modeler\\maven-cayenne-modeler-plugin\\','framework\\maven-cayenne-modeler-plugin\\',1);
elif project_version in list_projectName_changePath17:
i_crossVersionModule = i_crossVersionModule.replace('src\\java\\org\\apache\\','src\\java\\fr\\jayasoft\\',1);
elif project_version in list_projectName_changePath18:
i_crossVersionModule = i_crossVersionModule.replace('src\\','src\\main\\',1);
i_crossVersionModule = i_crossVersionModule.replace('\\dbcp\\','\\dbcp2\\',1);
elif project_version in list_projectName_changePath19:
i_crossVersionModule = i_crossVersionModule.replace('src\\','src\\main\\',1);
i_crossVersionModule = i_crossVersionModule.replace('\\jexl\\','\\jexl2\\',1);
elif project_version in list_projectName_changePath20:
i_crossVersionModule = i_crossVersionModule.replace('src\\','main\\',1);
i_crossVersionModule = i_crossVersionModule.replace('\\jexl\\','\\jexl3\\',1);
elif project_version in list_projectName_changePath21:
i_crossVersionModule = i_crossVersionModule.replace("parquet-common\\src\\main\\java\\","parquet-common\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-avro\\src\\main\\java\\","parquet-avro\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-benchmarks\\src\\main\\java\\","parquet-benchmarks\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-encoding\\src\\main\\java\\","parquet-encoding\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-cascading\\src\\main\\java\\","parquet-cascading\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-column\\src\\main\\java\\","parquet-column\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-generator\\src\\main\\java\\","parquet-generator\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-hadoop\\src\\main\\java\\","parquet-hadoop\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-hive\\src\\main\\java\\","parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-thrift\\src\\main\\java\\","parquet-thrift\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-pig\\src\\main\\java\\","parquet-pig\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-protobuf\\src\\main\\java\\","parquet-protobuf\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-scrooge\\src\\main\\java\\","parquet-scrooge\\src\\main\\java\\org\\apache\\",1);
i_crossVersionModule = i_crossVersionModule.replace("parquet-tools\\src\\main\\java\\","parquet-tools\\src\\main\\java\\org\\apache\\",1);
#因项目版本重复,需在上述变更完后,再变更
if project_version in list_projectName_changePath_after1:
i_crossVersionModule = i_crossVersionModule.replace('src\\','main\\',1);
#需要增加前缀
if projectName in list_projectName_needPrefix1:
string_added = "src\\";
string_started = "java\\";
string_started_2 = "main\\";
string_started_3 = "share\\";
string_started_4 = "plugin\\";
startSubscript = len(string_added);
length_string_started = len(string_started)+startSubscript;
length_string_started_2 = len(string_started_2)+startSubscript;
length_string_started_3 = len(string_started_3)+startSubscript;
length_string_started_4 = len(string_started_4)+startSubscript;
if i_crossVersionModule[startSubscript:length_string_started] == string_started \
or i_crossVersionModule[startSubscript:length_string_started_2] == string_started_2 \
or i_crossVersionModule[startSubscript:length_string_started_3] == string_started_3 \
or i_crossVersionModule[startSubscript:length_string_started_4] == string_started_4:
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
elif projectName in list_projectName_needPrefix2:
string_added = "deltaspike\\";
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
#需要去掉特定字符串之前的前缀
if project_version in list_projectName_removePrefix:
string_remove = "src\\";
if i_crossVersionModule[0:4] != string_remove:
position = i_crossVersionModule.find(string_remove)
i_crossVersionModule = i_crossVersionModule[position:];
i_crossVersionModule = i_crossVersionModule.replace(string_remove,'',1);#替换开头处
if project_version in list_projectSpecificVersion_needPrefix1:
string_added = "src\\main\\";
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
elif project_version in list_projectSpecificVersion_needPrefix2:
string_added = "src\\";
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
elif project_version in list_projectSpecificVersion_needPrefix3:
string_added = "giraph-core\\src\\";
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
elif project_version in list_projectSpecificVersion_needPrefix4:
if i_crossVersionModule[0:11] != "archiva-cli":
string_added = "archiva-modules\\";
i_crossVersionModule = i_crossVersionModule.replace(string_added,'',1);#替换开头处
return i_crossVersionModule;
# Add the letter 'V' to the version number to avoid storing 0.1 and 0.10 as 0.1
def FUNCTION_getVersionNumber(fileName):
str_sep= "-";
str_suffix=".udb";
delimiter_sep = fileName.rfind(str_sep);#匹配字符串最后一次出现的位置
delimiter_suffix = fileName.find(str_suffix);
version = 'v' + fileName[delimiter_sep+1:delimiter_suffix];#得到版本号
return version;
# Store the defect label data set after taking the intersection of module and instance, and calculate the intersection proportion of module and instance
def FUNCTION_savedIntersection(df_labels_currentVersion,df_intersection,path_common_labels_saved,i_fileLabels):
list_oneRow = [];
# storage Path
dir_path_saved_fileName = path_common_labels_saved + i_projectName + '/';
if not os.path.exists(dir_path_saved_fileName):
os.makedirs(dir_path_saved_fileName)
path_saved_fileName =dir_path_saved_fileName + i_fileLabels;
df_intersection.to_csv(path_saved_fileName,index=False);
len_original = len(df_labels_currentVersion);
len_intersection = len(df_intersection_labels);
percent_intersection = len_intersection / len_original;
list_oneRow.append(i_fileLabels);
list_oneRow.append(len_original);
list_oneRow.append(len_intersection);
list_oneRow.append(percent_intersection);
return list_oneRow;#Return calculation results
# Take the intersection of modules in the source code and instances in the defect dataset
def FUNCTION_takeIntersection(df_udb_currentVersion,df_labels_currentVersion,InstanceID):
if dataset_style == "Metrics-Repo-2010":#The instance name in this dataset is class and requires special processing
# Group by 'className'
series_group = df_udb_currentVersion.groupby(['className'])['className'].count();
dict_series_group = {'className':series_group.index,'numbers':series_group.values};
df_group = pd.DataFrame(dict_series_group);
# Discard the 'classname' with numbers > 1, because a classname may correspond to multiple relnames of different paths,
# that is, there are classes with the same name in different paths. Therefore, it is necessary to discard them because it is not known which path the classname corresponds to.
df_group = df_group[df_group['numbers']==1];#选择与relName唯一对应的行,抛掉不唯一对应的行
df_group = df_group[['className']];
df_udb_currentVersion = pd.merge(df_group, df_udb_currentVersion, on='className', how='inner');
# Take the intersection of modules in the source code and instances in the defect dataset
df_col1 = df_labels_currentVersion[[InstanceID,'bug']];
df_intersection_udb = pd.merge(df_udb_currentVersion, df_col1, on=InstanceID, how='inner');
df_col2 = df_udb_currentVersion[[InstanceID]];
df_intersection_labels = pd.merge(df_labels_currentVersion, df_col2, on=InstanceID, how='inner');
return (df_intersection_udb,df_intersection_labels);
#The module path of some projects in UDB needs to be prefixed with a specific prefix, otherwise there will be no intersection with the instance name in the original defect dataset.
def FUNCTION_HandleSpecialProject_tika(df_low,df_high):
string_added = "src\\";
for i_instance in range(len(df_low)):
cellName = df_low.loc[i_instance,'relName'];
df_low.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
#需要统一变更路径,因为前后两个相邻版本的路径变了,会导致没有同名实例。
#"tika-0.1","tika-0.2","tika-0.3"这三个版本,需要根据tika-0.4来加版本头
ins_low = deque(df_low['relName'].values);
ins_high = deque(df_high['relName'].values);
ins_low_copy = copy.copy(ins_low);
#定义数组下标
index_low = np.arange(0,len(ins_low));
int_num = 0
list_deleteIndex = []
while ins_low_copy:
x = ins_low_copy.popleft()
if x in ins_high:
ins_low.remove(x)
ins_high.remove(x)
list_deleteIndex.append(int_num)
int_num+=1;
index_low = np.delete(index_low, list_deleteIndex)
ins_low = np.array(ins_low)
ins_high = np.array(ins_high)
for i in range(len(ins_low)):
i_ins_low = ins_low[i]
index = index_low[i]
for j in range(len(ins_high)):
j_ins_high = ins_high[j]
if i_ins_low in j_ins_high:
df_low.loc[index,'relName'] = j_ins_high
ins_high = np.delete(ins_high, j)
break
return df_low;
def FUNCTION_HandleSpecialProject_label_tika(df_low,df_high):
#需要统一变更路径,因为前后两个相邻版本的路径变了,会导致没有同名实例。
#"tika-0.1","tika-0.2","tika-0.3"这三个版本,需要根据tika-0.4来加版本头
ins_low = deque(df_low['relName'].values);
ins_high = deque(df_high['relName'].values);
ins_low_copy = copy.copy(ins_low);
#定义数组下标
index_low = np.arange(0,len(ins_low));
int_num = 0
list_deleteIndex = []
while ins_low_copy:
x = ins_low_copy.popleft()
if x in ins_high:
ins_low.remove(x)
ins_high.remove(x)
list_deleteIndex.append(int_num)
int_num+=1;
index_low = np.delete(index_low, list_deleteIndex)
ins_low = np.array(ins_low)
ins_high = np.array(ins_high)
for i in range(len(ins_low)):
i_ins_low = ins_low[i]
index = index_low[i]
for j in range(len(ins_high)):
j_ins_high = ins_high[j]
if i_ins_low in j_ins_high:
df_low.loc[index,'relName'] = j_ins_high
ins_high = np.delete(ins_high, j)
break
return df_low;
# For the specific datasets used, the file name needs to be normalized, otherwise the corresponding module may not be found in the downloaded project code.
def FUNCTION_HandleSpecialProject(df,projectName,str_versionNumber):
#需要加"src"前缀的项目列表
list_projectName_needPrefix1 = ["commons-jcs","commons-beanutils","commons-codec","commons-compress","commons-digester",
"commons-io","commons-net","systemml","wss4j","nutch",]#根据交集结果,统计需要加src的项目
#需要加"deltaspike"前缀的项目列表
list_projectName_needPrefix2 = ["deltaspike"];
#需要加"src\\main"前缀的项目版本列表
list_projectSpecificVersion_needPrefix1 = ["commons-math-1.0","commons-math-1.1","commons-math-1.2"];
#需要加"src"前缀的项目版本列表
list_projectSpecificVersion_needPrefix2 = [
"commons-configuration-1.0","commons-configuration-1.1","commons-configuration-1.2","commons-configuration-1.3","commons-configuration-1.4","commons-configuration-1.5","commons-configuration-1.6","commons-configuration-1.7",
"commons-collections-1.0","commons-collections-2.0","commons-collections-2.1","commons-collections-3.0","commons-collections-3.1","commons-collections-3.2","commons-collections-3.3",
"commons-lang-1.0","commons-lang-2.0","commons-lang-2.1","commons-lang-2.2","commons-lang-2.3","commons-lang-2.4",
"commons-math-2.0","commons-math-2.1","commons-math-2.2","commons-math-3.0","commons-math-3.1","commons-math-3.2","commons-math-3.3","commons-math-3.4","commons-math-3.5","commons-math-3.6",
"commons-validator-1.0","commons-validator-1.1.0","commons-validator-1.2.0","commons-validator-1.3.0",
];
#需要加"giraph-core\\src\\"前缀的项目版本列表
list_projectSpecificVersion_needPrefix3 = ["giraph-0.1.0"];
#需要加"archiva-modules\\"前缀的项目版本列表(除4个以"archiva-cli"开头的特定实例外)
list_projectSpecificVersion_needPrefix4 = ["archiva-1.0"];
#需要统一变更路径
list_projectName_changePath1 = ["commons-beanutils-1.9.0",
"commons-bcel-6.0","commons-bcel-6.1","commons-bcel-6.2",#除这三个版本外,之前的版本不需要替换src
"commons-codec-1.6","commons-codec-1.7","commons-codec-1.8","commons-codec-1.9","commons-codec-1.10","commons-codec-1.11",#除这几个版本外,之前的版本需要加src
"commons-io-2.0","commons-io-2.1","commons-io-2.2","commons-io-2.3","commons-io-2.4","commons-io-2.5",
"commons-net-2.0","commons-net-2.1","commons-net-2.2","commons-net-3.0","commons-net-3.1","commons-net-3.2","commons-net-3.3","commons-net-3.4","commons-net-3.5","commons-net-3.6",
];
list_projectName_changePath2 = ["santuario-java-2.0.0","santuario-java-2.1.0"];
list_projectName_changePath3 = ["commons-collections-4.0","commons-collections-4.1"];
list_projectName_changePath4 = ["commons-configuration-2.0","commons-configuration-2.1","commons-configuration-2.2"];
list_projectName_changePath5 = ["commons-digester-3.0","commons-digester-3.1","commons-digester-3.2"];
list_projectName_changePath6 = ["commons-lang-3.0","commons-lang-3.1","commons-lang-3.2","commons-lang-3.3","commons-lang-3.4","commons-lang-3.5","commons-lang-3.6","commons-lang-3.7"];
list_projectName_changePath7 = ["commons-math-3.0","commons-math-3.1","commons-math-3.2","commons-math-3.3","commons-math-3.4","commons-math-3.5","commons-math-3.6"];
#需要把"common""cube""dictionary""job""metadata""storage"开头的加上"core-"
list_projectName_changePath8 = ["kylin-0.6.1","kylin-0.7.1","kylin-1.0.0","kylin-1.1.0","kylin-1.2.0","kylin-1.3.0"];
#需要把"main\\java\\"变为"src\\share"
list_projectName_changePath9 = ["commons-validator-1.4.0","commons-validator-1.5.0","commons-validator-1.6.0"]
#需要把"\\vfs2\\"变为"\\vfs\\"
list_projectName_changePath10 = ["commons-vfs-2.0","commons-vfs-2.1"]
#需要把"\\vfs2\\"变为"\\vfs\\"
#需要把"commons-vfs2\\"变为"core\\"以及"commons-vfs2-sandbox\\"变为"sandbox\\"
list_projectName_changePath11 = ["commons-vfs-2.2"]
#需要把"commons-jcs-core\\src\\main\\java\\org\\apache\\commons\\"变为"src\\java\\org\\apache\\"
#以及"commons-jcs-sandbox\\yajcache\\src\\main\\java\\org\\apache\\commons\\"变为"sandbox\\yajcache\\src\\org\\apache\\"
#以及"src\\experimental\\org\\apache\\commons\\"变为"src\\experimental\\org\\apache\\"
list_projectName_changePath12 = ["commons-jcs-2.0","commons-jcs-2.1","commons-jcs-2.2"]
#jspwiki-2.9.0需要把"src\\org\\apache\\wiki\\"变为"src\\com\\ecyrd\\jspwiki\\"
list_projectName_changePath13 = ["jspwiki-2.9.0"]
#jspwiki-2.10.0需要把"main\\java\\org\\apache\\wiki\\"变为"src\\com\\ecyrd\\jspwiki\\"
#jspwiki-2.10.0需要把"main\\java\\org\\apache\\catalina\\"变为"src\\org\\apache\\catalina\\"
list_projectName_changePath14 = ["jspwiki-2.10.0"]
#需要把"\\knox\\"变为"\\hadoop\\"
list_projectName_changePath15 = ["knox-1.0.0"]
#需要把"framework\\cayenne-modeler\\"变为"modeler\\cayenne-modeler\\"
#需要把"framework\\maven-cayenne-modeler-plugin\\"变为"modeler\\maven-cayenne-modeler-plugin\\"
list_projectName_changePath16 = ["cayenne-3.0.0"]
#需要把"src\\java\\fr\\jayasoft\\"变为"src\\java\\org\\apache\\"
list_projectName_changePath17 = ["ant-ivy-1.4.1"]
#需要把"src\\main\\"变为"src\\"
#需要把"\\dbcp2\\"变为"\\dbcp\\"
list_projectName_changePath18 = ["commons-dbcp-2.0","commons-dbcp-2.1","commons-dbcp-2.2","commons-dbcp-2.3","commons-dbcp-2.4","commons-dbcp-2.5",]
#需要把"src\\main\\"变为"src\\"
#需要把"\\jexl2\\"变为"\\jexl\\"
list_projectName_changePath19 = ["commons-jexl-2.0","commons-jexl-2.1",]
#需要把"main\\"变为"src\\"
#需要把"\\jexl3\\"变为"\\jexl\\"
list_projectName_changePath20 = ["commons-jexl-3.0","commons-jexl-3.1",]
#需要把"parquet-common\\src\\main\\java\\org\\apache\\"变为"parquet-common\\src\\main\\java\\"
#需要把"parquet-avro\\src\\main\\java\\org\\apache\\"变为"parquet-avro\\src\\main\\java\\"
#需要把"parquet-benchmarks\\src\\main\\java\\org\\apache\\"变为"parquet-benchmarks\\src\\main\\java\\"
#需要把"parquet-encoding\\src\\main\\java\\org\\apache\\"变为"parquet-encoding\\src\\main\\java\\"
#需要把"parquet-cascading\\src\\main\\java\\org\\apache\\"变为"parquet-cascading\\src\\main\\java\\"
#需要把"parquet-column\\src\\main\\java\\org\\apache\\"变为"parquet-column\\src\\main\\java\\"
#需要把"parquet-generator\\src\\main\\java\\org\\apache\\"变为"parquet-generator\\src\\main\\java\\"
#需要把"parquet-hadoop\\src\\main\\java\\org\\apache\\"变为"parquet-hadoop\\src\\main\\java\\"
#需要把"parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\org\\apache\\"变为"parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\"
#需要把"parquet-thrift\\src\\main\\java\\org\\apache\\"变为"parquet-thrift\\src\\main\\java\\"
#需要把"parquet-pig\\src\\main\\java\\org\\apache\\"变为"parquet-pig\\src\\main\\java\\"
#需要把"parquet-protobuf\\src\\main\\java\\org\\apache\\"变为"parquet-protobuf\\src\\main\\java\\"
#需要把"parquet-scrooge\\src\\main\\java\\org\\apache\\"变为"parquet-scrooge\\src\\main\\java\\"
#需要把"parquet-tools\\src\\main\\java\\org\\apache\\"变为"parquet-tools\\src\\main\\java\\"
list_projectName_changePath21 = ["parquet-mr-1.7.0",]
#因项目版本重复,需在上述变更完后,再变更
list_projectName_changePath_after1 = [
"commons-collections-4.0","commons-collections-4.1",#除这几个版本外,之前的版本需要加src
"commons-configuration-1.8","commons-configuration-1.9","commons-configuration-1.10","commons-configuration-2.0","commons-configuration-2.1","commons-configuration-2.2",#除这几个版本外,之前的版本需要加src
"commons-lang-2.5","commons-lang-2.6","commons-lang-3.0","commons-lang-3.1","commons-lang-3.2","commons-lang-3.3","commons-lang-3.4","commons-lang-3.5","commons-lang-3.6","commons-lang-3.7",
"commons-digester-2.1","commons-digester-3.0","commons-digester-3.1","commons-digester-3.2",]
#需要统一变更路径,因为前后两个相邻版本的路径变了,会导致没有同名实例。
versionNumber = str_versionNumber[1:];
project_version = projectName + '-' + versionNumber;
if project_version in list_projectName_changePath1:
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\','src\\'));
elif project_version in list_projectName_changePath2:
if project_version == "santuario-java-2.0.0":
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\java\\','src\\'));
if project_version == "santuario-java-2.1.0":
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\java\\','src\\'));
elif project_version in list_projectName_changePath3:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\collections4\\','\\collections\\'));
elif project_version in list_projectName_changePath4:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\configuration2\\','\\configuration\\'));
elif project_version in list_projectName_changePath5:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\digester3\\','\\digester\\'));
elif project_version in list_projectName_changePath6:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\lang3\\','\\lang\\'));
elif project_version in list_projectName_changePath7:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\math3\\','\\math\\'));
elif project_version in list_projectName_changePath8:
string_added = "core-";
string_started = "common\\";
string_started_2 = "cube\\";
string_started_3 = "dictionary\\";
string_started_4 = "job\\";
string_started_5 = "metadata\\";
string_started_6 = "storage\\";
length_string_started = len(string_started);
length_string_started_2 = len(string_started_2);
length_string_started_3 = len(string_started_3);
length_string_started_4 = len(string_started_4);
length_string_started_5 = len(string_started_5);
length_string_started_6 = len(string_started_6);
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
if cellName[0:length_string_started] == string_started \
or cellName[0:length_string_started_2] == string_started_2 \
or cellName[0:length_string_started_3] == string_started_3 \
or cellName[0:length_string_started_4] == string_started_4 \
or cellName[0:length_string_started_5] == string_started_5 \
or cellName[0:length_string_started_6] == string_started_6:
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
if project_version == "kylin-0.6.1":
df['relName'] = df['relName'].apply(lambda row: row.replace('\\java\\com\\kylinolap\\','\\java\\org\\apache\\kylin\\'));
elif project_version in list_projectName_changePath9:
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\java\\','src\\share\\'));
elif project_version in list_projectName_changePath10:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\vfs2\\','\\vfs\\'));
elif project_version in list_projectName_changePath11:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\vfs2\\','\\vfs\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-vfs2\\','core\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-vfs2-sandbox\\','sandbox\\'));
elif project_version in list_projectName_changePath12:
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-jcs-core\\src\\main\\java\\org\\apache\\commons\\','src\\java\\org\\apache\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-jcs-sandbox\\yajcache\\src\\main\\java\\org\\apache\\commons\\','sandbox\\yajcache\\src\\org\\apache\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\experimental\\org\\apache\\commons\\','src\\experimental\\org\\apache\\'));
elif project_version in list_projectName_changePath13:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\org\\apache\\wiki\\','src\\com\\ecyrd\\jspwiki\\'));
elif project_version in list_projectName_changePath14:
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\java\\org\\apache\\wiki\\','src\\com\\ecyrd\\jspwiki\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\java\\org\\apache\\catalina\\','src\\org\\apache\\catalina\\'));
elif project_version in list_projectName_changePath15:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\knox\\','\\hadoop\\'));
elif project_version in list_projectName_changePath16:
df['relName'] = df['relName'].apply(lambda row: row.replace('framework\\cayenne-modeler\\','modeler\\cayenne-modeler\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('framework\\maven-cayenne-modeler-plugin\\','modeler\\maven-cayenne-modeler-plugin\\'));
elif project_version in list_projectName_changePath17:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\java\\fr\\jayasoft\\','src\\java\\org\\apache\\'));
elif project_version in list_projectName_changePath18:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\','src\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('\\dbcp2\\','\\dbcp\\'));
elif project_version in list_projectName_changePath19:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\','src\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('\\jexl2\\','\\jexl\\'));
elif project_version in list_projectName_changePath20:
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\','src\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('\\jexl3\\','\\jexl\\'));
elif project_version in list_projectName_changePath21:
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-common\\src\\main\\java\\org\\apache\\","parquet-common\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-avro\\src\\main\\java\\org\\apache\\","parquet-avro\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-benchmarks\\src\\main\\java\\org\\apache\\","parquet-benchmarks\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-encoding\\src\\main\\java\\org\\apache\\","parquet-encoding\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-cascading\\src\\main\\java\\org\\apache\\","parquet-cascading\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-column\\src\\main\\java\\org\\apache\\","parquet-column\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-generator\\src\\main\\java\\org\\apache\\","parquet-generator\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-hadoop\\src\\main\\java\\org\\apache\\","parquet-hadoop\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\org\\apache\\","parquet-hive\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-thrift\\src\\main\\java\\org\\apache\\","parquet-thrift\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-pig\\src\\main\\java\\org\\apache\\","parquet-pig\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-protobuf\\src\\main\\java\\org\\apache\\","parquet-protobuf\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-scrooge\\src\\main\\java\\org\\apache\\","parquet-scrooge\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-tools\\src\\main\\java\\org\\apache\\","parquet-tools\\src\\main\\java\\"));
#因项目版本重复,需在上述变更完后,再变更
if project_version in list_projectName_changePath_after1:
df['relName'] = df['relName'].apply(lambda row: row.replace('main\\','src\\'));
#需要增加前缀
if projectName in list_projectName_needPrefix1:
string_added = "src\\";
string_started = "java\\";
string_started_2 = "main\\";
string_started_3 = "share\\";
string_started_4 = "plugin\\";
length_string_started = len(string_started);
length_string_started_2 = len(string_started_2);
length_string_started_3 = len(string_started_3);
length_string_started_4 = len(string_started_4);
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
if cellName[0:length_string_started] == string_started \
or cellName[0:length_string_started_2] == string_started_2 \
or cellName[0:length_string_started_3] == string_started_3 \
or cellName[0:length_string_started_4] == string_started_4:
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
elif projectName in list_projectName_needPrefix2:
string_added = "deltaspike\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
if project_version in list_projectSpecificVersion_needPrefix1:
string_added = "src\\main\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
elif project_version in list_projectSpecificVersion_needPrefix2:
string_added = "src\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
elif project_version in list_projectSpecificVersion_needPrefix3:
string_added = "giraph-core\\src\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
elif project_version in list_projectSpecificVersion_needPrefix4:
string_added = "archiva-modules\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
if cellName[0:11] != "archiva-cli":
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
return df;
#The path of specific projects in the original defect dataset needs to be changed, otherwise there will be no intersection with the module in UDB file.
def FUNCTION_HandleSpecialProject_label(df,projectName,str_versionNumber):
#需要加"giraph-core\\"前缀的项目版本列表
list_projectSpecificVersion_needPrefix3 = ["giraph-0.1.0"];
#需要加"archiva-modules\\"前缀的项目版本列表(除特定4个实例外)
list_projectSpecificVersion_needPrefix4 = ["archiva-1.0"];
#需要把"src\\main\\"变为"src\\"
list_projectName_changePath1 = [
"commons-bcel-6.0","commons-bcel-6.1","commons-bcel-6.2",
"commons-codec-1.6","commons-codec-1.7","commons-codec-1.8","commons-codec-1.9","commons-codec-1.10","commons-codec-1.11",
"commons-collections-4.0","commons-collections-4.1",
"commons-configuration-1.8","commons-configuration-1.9","commons-configuration-1.10","commons-configuration-2.0","commons-configuration-2.1","commons-configuration-2.2",
"commons-digester-2.1","commons-digester-3.0","commons-digester-3.1","commons-digester-3.2",
"commons-io-2.0","commons-io-2.1","commons-io-2.2","commons-io-2.3","commons-io-2.4","commons-io-2.5",
"commons-lang-2.5","commons-lang-2.6","commons-lang-3.0","commons-lang-3.1","commons-lang-3.2","commons-lang-3.3","commons-lang-3.4","commons-lang-3.5","commons-lang-3.6","commons-lang-3.7",
"commons-net-2.0","commons-net-2.1","commons-net-2.2","commons-net-3.0","commons-net-3.1","commons-net-3.2","commons-net-3.3","commons-net-3.4","commons-net-3.5","commons-net-3.6",
];
#需要把"src\\main\\java"变为"src\\"
list_projectName_changePath2 = ["santuario-java-2.0.0","santuario-java-2.1.0"]
#需要把"common""cube""dictionary""job""metadata""storage"开头的加上"core-"
list_projectName_changePath8 = ["kylin-0.6.1","kylin-0.7.1","kylin-1.0.0","kylin-1.1.0","kylin-1.2.0","kylin-1.3.0"];
#需要把"src\\main\\java"变为"src\\share"
list_projectName_changePath9 = ["commons-validator-1.4.0","commons-validator-1.5.0","commons-validator-1.6.0"];
#需要把"\\vfs2\\"变为"\\vfs\\"
list_projectName_changePath10 = ["commons-vfs-2.0","commons-vfs-2.1"]
#需要把"\\vfs2\\"变为"\\vfs\\"
#需要把"commons-vfs2\\"变为"core\\"以及"commons-vfs2-sandbox\\"变为"sandbox\\"
list_projectName_changePath11 = ["commons-vfs-2.2"]
#需要把"commons-jcs-core\\src\\main\\java\\org\\apache\\commons\\"变为"src\\java\\org\\apache\\"
#以及"commons-jcs-sandbox\\yajcache\\src\\main\\java\\org\\apache\\commons\\"变为"sandbox\\yajcache\\src\\org\\apache\\"
#以及"src\\experimental\\org\\apache\\commons\\"变为"src\\experimental\\org\\apache\\"
list_projectName_changePath12 = ["commons-jcs-2.0","commons-jcs-2.1","commons-jcs-2.2"]
#jspwiki-2.9.0需要把"src\\org\\apache\\wiki\\"变为"src\\com\\ecyrd\\jspwiki\\"
list_projectName_changePath13 = ["jspwiki-2.9.0"]
#jspwiki-2.10.0需要把"src\\main\\java\\org\\apache\\wiki\\"变为"src\\com\\ecyrd\\jspwiki\\"
#jspwiki-2.10.0需要把"src\\main\\java\\org\\apache\\catalina\\"变为"src\\org\\apache\\catalina\\"
list_projectName_changePath14 = ["jspwiki-2.10.0"]
#需要把"\\knox\\"变为"\\hadoop\\"
list_projectName_changePath15 = ["knox-1.0.0"]
#需要把"framework\\cayenne-modeler\\"变为"modeler\\cayenne-modeler\\"
#需要把"framework\\maven-cayenne-modeler-plugin\\"变为"modeler\\maven-cayenne-modeler-plugin\\"
list_projectName_changePath16 = ["cayenne-3.0.0"]
#需要把"src\\java\\fr\\jayasoft\\"变为"src\\java\\org\\apache\\"
list_projectName_changePath17 = ["ant-ivy-1.4.1"]
#需要把"src\\main\\"变为"src\\"
#需要把"\\dbcp2\\"变为"\\dbcp\\"
list_projectName_changePath18 = ["commons-dbcp-2.0","commons-dbcp-2.1","commons-dbcp-2.2","commons-dbcp-2.3","commons-dbcp-2.4","commons-dbcp-2.5",]
#需要把"src\\main\\"变为"src\\"
#需要把"\\jexl2\\"变为"\\jexl\\"
list_projectName_changePath19 = ["commons-jexl-2.0","commons-jexl-2.1",]
#需要把"src\\main\\"变为"src\\"
#需要把"\\jexl2\\"变为"\\jexl\\"
list_projectName_changePath20 = ["commons-jexl-3.0","commons-jexl-3.1",]
#需要把"parquet-common\\src\\main\\java\\org\\apache\\"变为"parquet-common\\src\\main\\java\\"
#需要把"parquet-avro\\src\\main\\java\\org\\apache\\"变为"parquet-avro\\src\\main\\java\\"
#需要把"parquet-benchmarks\\src\\main\\java\\org\\apache\\"变为"parquet-benchmarks\\src\\main\\java\\"
#需要把"parquet-encoding\\src\\main\\java\\org\\apache\\"变为"parquet-encoding\\src\\main\\java\\"
#需要把"parquet-cascading\\src\\main\\java\\org\\apache\\"变为"parquet-cascading\\src\\main\\java\\"
#需要把"parquet-column\\src\\main\\java\\org\\apache\\"变为"parquet-column\\src\\main\\java\\"
#需要把"parquet-generator\\src\\main\\java\\org\\apache\\"变为"parquet-generator\\src\\main\\java\\"
#需要把"parquet-hadoop\\src\\main\\java\\org\\apache\\"变为"parquet-hadoop\\src\\main\\java\\"
#需要把"parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\org\\apache\\"变为"parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\"
#需要把"parquet-thrift\\src\\main\\java\\org\\apache\\"变为"parquet-thrift\\src\\main\\java\\"
#需要把"parquet-pig\\src\\main\\java\\org\\apache\\"变为"parquet-pig\\src\\main\\java\\"
#需要把"parquet-protobuf\\src\\main\\java\\org\\apache\\"变为"parquet-protobuf\\src\\main\\java\\"
#需要把"parquet-scrooge\\src\\main\\java\\org\\apache\\"变为"parquet-scrooge\\src\\main\\java\\"
#需要把"parquet-tools\\src\\main\\java\\org\\apache\\"变为"parquet-tools\\src\\main\\java\\"
list_projectName_changePath21 = ["parquet-mr-1.7.0",]
#需要统一变更路径,因为前后两个相邻版本的路径变了,会导致没有同名实例。
versionNumber = str_versionNumber[1:];
project_version = projectName + '-' + versionNumber;
if project_version in list_projectName_changePath1:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\','src\\'));
elif project_version in list_projectName_changePath2:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\java\\','src\\'));
elif project_version in list_projectName_changePath8:#需要把"common""cube""dictionary""job""metadata""storage"开头的加上"core-"
string_added = "core-";
string_started = "common\\";
string_started_2 = "cube\\";
string_started_3 = "dictionary\\";
string_started_4 = "job\\";
string_started_5 = "metadata\\";
string_started_6 = "storage\\";
length_string_started = len(string_started);
length_string_started_2 = len(string_started_2);
length_string_started_3 = len(string_started_3);
length_string_started_4 = len(string_started_4);
length_string_started_5 = len(string_started_5);
length_string_started_6 = len(string_started_6);
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
if cellName[0:length_string_started] == string_started \
or cellName[0:length_string_started_2] == string_started_2 \
or cellName[0:length_string_started_3] == string_started_3 \
or cellName[0:length_string_started_4] == string_started_4 \
or cellName[0:length_string_started_5] == string_started_5 \
or cellName[0:length_string_started_6] == string_started_6:
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
if project_version == "kylin-0.6.1":
df['relName'] = df['relName'].apply(lambda row: row.replace('\\java\\com\\kylinolap\\','\\java\\org\\apache\\kylin\\'));
elif project_version in list_projectName_changePath9:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\java\\','src\\share\\'));
elif project_version in list_projectName_changePath10:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\vfs2\\','\\vfs\\'));
elif project_version in list_projectName_changePath11:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\vfs2\\','\\vfs\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-vfs2\\','core\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-vfs2-sandbox\\','sandbox\\'));
elif project_version in list_projectName_changePath12:
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-jcs-core\\src\\main\\java\\org\\apache\\commons\\','src\\java\\org\\apache\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('commons-jcs-sandbox\\yajcache\\src\\main\\java\\org\\apache\\commons\\','sandbox\\yajcache\\src\\org\\apache\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\experimental\\org\\apache\\commons\\','src\\experimental\\org\\apache\\'));
elif project_version in list_projectName_changePath13:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\org\\apache\\wiki\\','src\\com\\ecyrd\\jspwiki\\'));
elif project_version in list_projectName_changePath14:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\java\\org\\apache\\wiki\\','src\\com\\ecyrd\\jspwiki\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\java\\org\\apache\\catalina\\','src\\org\\apache\\catalina\\'));
elif project_version in list_projectName_changePath15:
df['relName'] = df['relName'].apply(lambda row: row.replace('\\knox\\','\\hadoop\\'));
elif project_version in list_projectName_changePath16:
df['relName'] = df['relName'].apply(lambda row: row.replace('framework\\cayenne-modeler\\','modeler\\cayenne-modeler\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('framework\\maven-cayenne-modeler-plugin\\','modeler\\maven-cayenne-modeler-plugin\\'));
elif project_version in list_projectName_changePath17:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\java\\fr\\jayasoft\\','src\\java\\org\\apache\\'));
elif project_version in list_projectName_changePath18:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\','src\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('\\dbcp2\\','\\dbcp\\'));
elif project_version in list_projectName_changePath19:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\','src\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('\\jexl2\\','\\jexl\\'));
elif project_version in list_projectName_changePath20:
df['relName'] = df['relName'].apply(lambda row: row.replace('src\\main\\','src\\'));
df['relName'] = df['relName'].apply(lambda row: row.replace('\\jexl2\\','\\jexl\\'));
elif project_version in list_projectName_changePath21:
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-common\\src\\main\\java\\org\\apache\\","parquet-common\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-avro\\src\\main\\java\\org\\apache\\","parquet-avro\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-benchmarks\\src\\main\\java\\org\\apache\\","parquet-benchmarks\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-encoding\\src\\main\\java\\org\\apache\\","parquet-encoding\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-cascading\\src\\main\\java\\org\\apache\\","parquet-cascading\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-column\\src\\main\\java\\org\\apache\\","parquet-column\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-generator\\src\\main\\java\\org\\apache\\","parquet-generator\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-hadoop\\src\\main\\java\\org\\apache\\","parquet-hadoop\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-hive\\parquet-hive-storage-handler\\src\\main\\java\\org\\apache\\","parquet-hive\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-thrift\\src\\main\\java\\org\\apache\\","parquet-thrift\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-pig\\src\\main\\java\\org\\apache\\","parquet-pig\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-protobuf\\src\\main\\java\\org\\apache\\","parquet-protobuf\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-scrooge\\src\\main\\java\\org\\apache\\","parquet-scrooge\\src\\main\\java\\"));
df['relName'] = df['relName'].apply(lambda row: row.replace("parquet-tools\\src\\main\\java\\org\\apache\\","parquet-tools\\src\\main\\java\\"));
#需要增加前缀
if project_version in list_projectSpecificVersion_needPrefix3:
string_added = "giraph-core\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
elif project_version in list_projectSpecificVersion_needPrefix4:
string_added = "archiva-modules\\";
for i_instance in range(len(df)):
cellName = df.loc[i_instance,'relName'];
temp_1 = cellName[0:11]
if temp_1 != "archiva-cli":
df.loc[i_instance,'relName'] = ''.join([string_added, cellName]);
# #需要加"src\\main"前缀的项目版本列表
# list_projectSpecificVersion_needPrefix1 = ["commons-math-1.0","commons-math-1.1","commons-math-1.2"];
# #需要统一变更路径,因为前后两个相邻版本的路径变了,会导致没有同名实例。
# versionNumber = str_versionNumber[1:];
# project_version = projectName + '-' + versionNumber;
# if project_version in list_projectSpecificVersion_needPrefix1:
# df['relName'] = df['relName'].apply(lambda row: row.replace('src\\','src\\main\\'));
return df;
# If the defect label is a count label, it will be changed to 0, 1 binary label
def FUNCTION_changeToLabel(x):
if x > 0:
return 1;
else:
return 0;
def FUNCTION_separatorSubstitution(x):
return x.replace("/", "\\");
def FUNCTION_substring(x):
return x[1:];
# Unify the column name of different defect data sets, which can be adjusted and expanded as needed
def FUNCTION_unifyColumnNames(df_file_original,dataset_style):
if dataset_style == "Metrics-Repo-2010":#原始数据集两个name列,'className'列没有分隔符,是'.'号
df_file_original.rename(columns={'name.1':'className'}, inplace=True);
df_file_original.drop(['name','version'], axis=1, inplace=True);#只有Metrics-Repo-2010自带version列,先删除,之后统一加上带字母v的version列
elif dataset_style == "JIRA-HA-2019":
df_file_original.rename(columns={'File':'relName','CountLineCode':'loc','HeuBugCount':'bug'}, inplace=True);
df_file_original['relName'] = df_file_original['relName'].apply(lambda row: FUNCTION_separatorSubstitution(row));
df_file_original = df_file_original.drop(['HeuBug','RealBug','RealBugCount'], axis=1);
elif dataset_style == "JIRA-RA-2019":
df_file_original.rename(columns={'File':'relName','CountLineCode':'loc','RealBugCount':'bug'}, inplace=True);
df_file_original['relName'] = df_file_original['relName'].apply(lambda row: FUNCTION_separatorSubstitution(row));
df_file_original = df_file_original.drop(['HeuBug','RealBug','HeuBugCount'], axis=1);
elif dataset_style == "ECLIPSE-2007":
df_file_original.rename(columns={'filename':'relName','TLOC':'loc','post':'bug'}, inplace=True);
df_file_original['relName'] = df_file_original['relName'].apply(lambda row: FUNCTION_substring(row));
df_file_original['relName'] = df_file_original['relName'].apply(lambda row: FUNCTION_separatorSubstitution(row));
df_file_original = df_file_original.drop(['plugin','pre'], axis=1);
cols = df_file_original.columns.tolist();
cols.remove('bug');
cols.append('bug');
df_file_original = df_file_original[cols];
elif dataset_style == "MA-SZZ-2020":
df_file_original.rename(columns={'name_id':'relName'}, inplace=True);
elif dataset_style in ["IND-JLMIV+R-2020","6M-SZZ-2020"]:
df_file_original['relName'] = df_file_original['relName'].apply(lambda row: FUNCTION_separatorSubstitution(row));
#===To calculate the bug density, the ranking indicators need to use the bug density===#
df_file_original['bugDensity'] = df_file_original['bug']/df_file_original['loc'];
df_file_original.fillna(0, inplace=True);
# change the number of bug to the label (0 or 1)
df_file_original['bug'] = df_file_original['bug'].apply(lambda x: FUNCTION_changeToLabel(x));
#===end===#
return df_file_original;
# Read and preprocess the original defect dataset
def FUNCTION_readLabelsDatasets(fileLabelsPath_currentVersion,dataset_style):
df_file_original = | pd.read_csv(fileLabelsPath_currentVersion) | pandas.read_csv |
'''
https://note.youdao.com/share/?id=50ade2586b4ccbfc5da4c5d6199db863&type=note#/
标题:Python 爬取淘宝商品数据挖掘分析实战
项目内容:
本案例选择>> 商品类目:沙发;
筛选条件:天猫、销量从高到低、价格500元以上;
数量:共100页 4400个商品。
分析目的:
1. 对商品标题进行文本分析 词云可视化
2. 不同关键词word对应的sales的统计分析
3. 商品的价格分布情况分析
4. 商品的销量分布情况分析
5. 不同价格区间的商品的平均销量分布
6. 商品价格对销量的影响分析
7. 商品价格对销售额的影响分析
8. 不同省份或城市的商品数量分布
9. 不同省份的商品平均销量分布
注:本项目仅以以上几项分析为例。
项目步骤:
1. 数据采集:Python爬取淘宝网商品数据
2. 对数据进行清洗和处理
3. 文本分析:jieba分词、wordcloud可视化
4. 数据柱形图可视化 barh
5. 数据直方图可视化 hist
6. 数据散点图可视化 scatter
7. 数据回归分析可视化 regplot
工具&模块:
工具:本案例使用的代码编辑工具 Anaconda的Spyder
模块:requests、retrying、jieba、missingno、wordcloud、imread、matplotlib、seaborn等。
原代码和相关文档 下载链接:https://pan.baidu.com/s/1nwEx949 密码:<PASSWORD>
'''
'''
一、爬取数据:
说明:淘宝商品页为JSON格式 这里使用正则表达式进行解析;
因淘宝网是反爬虫的,虽然使用多线程、修改headers参数,但仍然不能保证每次100%爬取,
所以,我增加了循环爬取,每次循环爬取未爬取成功的页 直至所有页爬取成功才停止。
代码如下:
'''
import re
import time
import requests
import pandas as pd
from retrying import retry
from concurrent.futures import ThreadPoolExecutor
start = time.clock() #计时-开始
#plist 为1-100页的URL的编号num
plist = []
for i in range(1,101):
j = 44*(i-1)
plist.append(j)
listno = plist
datatmsp = pd.DataFrame(columns=[])
while True:
@retry(stop_max_attempt_number = 8) #设置最大重试次数
def network_programming(num):
url='https://s.taobao.com/search?q=%E6%B2%99%E5%8F%91&imgfile= \
&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_ \
20180207&ie=utf8&sort=sale-desc&style=list&fs=1&filter_tianmao \
=tmall&filter=reserve_price%5B500%2C%5D&bcoffset=0& \
p4ppushleft=%2C44&s=' + str(num)
web = requests.get(url, headers=headers)
web.encoding = 'utf-8'
return web
# 多线程
def multithreading():
number = listno #每次爬取未爬取成功的页
event = []
with ThreadPoolExecutor(max_workers=10) as executor:
for result in executor.map(network_programming,
number, chunksize=10):
event.append(result)
return event
# 隐藏:修改headers参数
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) \
AppleWebKit/537.36(KHTML, like Gecko) \
Chrome/55.0.2883.87 Safari/537.36'}
listpg = []
event = multithreading()
for i in event:
json = re.findall('"auctions":(.*?),"recommendAuctions"', i.text)
if len(json):
table = pd.read_json(json[0])
datatmsp = pd.concat([datatmsp,table],axis=0,ignore_index=True)
pg = re.findall('"pageNum":(.*?),"p4pbottom_up"',i.text)[0]
listpg.append(pg) #记入每一次爬取成功的页码
lists = []
for a in listpg:
b = 44*(int(a)-1)
lists.append(b) #将爬取成功的页码转为url中的num值
listn = listno
listno = [] #将本次爬取失败的页记入列表中 用于循环爬取
for p in listn:
if p not in lists:
listno.append(p)
if len(listno) == 0: #当未爬取页数为0时 终止循环!
break
datatmsp.to_excel('datatmsp.xls', index=False) #导出数据为Excel
end = time.clock() #计时-结束
print ("爬取完成 用时:", end - start,'s')
'''
二、数据清洗、处理: (此步骤也可以在Excel中完成 再读入数据)
'''
datatmsp = | pd.read_excel('datatmsp.xls') | pandas.read_excel |
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sim_utils.model import Model
class Replicator:
def __init__(self, scenarios, replications):
"""Constructor class for Replicator
"""
self.replications = replications
self.scenarios = scenarios
# Set up DataFrames for all trials results
self.summary_output = pd.DataFrame()
self.summary_output_by_day = pd.DataFrame()
self.summary_queue_times = pd.DataFrame()
self.summary_resources = pd.DataFrame()
self.summary_tracker = pd.DataFrame()
self.summary_max_queues = pd.DataFrame()
self.output_pivot = pd.DataFrame()
self.resources_pivot = pd.DataFrame()
self.summary_time_stamps = pd.DataFrame()
self.summary_time_stamps_by_priority_pct_50 = pd.DataFrame()
self.summary_time_stamps_by_priority_pct_95 = pd.DataFrame()
self.summary_complete_in_24hrs = | pd.DataFrame() | pandas.DataFrame |
"""Functions for manipulating metadata constants."""
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pudl.metadata.constants import PERIODS
def format_errors(*errors: str, title: str = None, pydantic: bool = False) -> str:
"""Format multiple errors into a single error.
Args:
errors: Error messages.
title: Title for error messages.
Examples:
>>> e = format_errors('worse', title='bad')
>>> print(e)
bad
* worse
>>> e = format_errors('worse', title='bad', pydantic=True)
>>> print(e)
bad
* worse
>>> e = format_errors('bad', 'worse')
>>> print(e)
* bad
* worse
>>> e = format_errors('bad', 'worse', pydantic=True)
>>> print(e)
* bad
* worse
"""
title = f"{title}\n" if title else ""
messages = [f"* {e}" for e in errors if e]
if pydantic:
indent = 0 if title else 1
messages[indent:] = [f" {m}" for m in messages[indent:]]
return title + "\n".join(messages)
# --- Foreign keys --- #
def _parse_field_names(fields: List[Union[str, dict]]) -> List[str]:
"""Parse field names.
Args:
fields: Either field names or field descriptors with a `name` key.
Returns:
Field names.
"""
return [field if isinstance(field, str) else field["name"] for field in fields]
def _parse_foreign_key_rule(rule: dict, name: str, key: List[str]) -> List[dict]:
"""Parse foreign key rule from resource descriptor.
Args:
meta: Resource descriptor.
name: Resource name.
key: Resource primary key.
Returns:
Parsed foreign key rules:
* `fields` (List[str]): Local fields.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference primary key fields.
* `exclude` (List[str]): Names of resources to exclude, including `name`.
"""
rules = []
for fields in rule["fields"]:
exclude = rule.get("exclude", [])
rules.append(
{
"fields": fields,
"reference": {"resource": name, "fields": key},
"exclude": [name] + exclude,
}
)
return rules
def _build_foreign_key_tree(
resources: Dict[str, dict]
) -> Dict[str, Dict[Tuple[str, ...], dict]]:
"""Build foreign key tree.
Args:
resources: Resource descriptors by name.
Returns:
Foreign key tree where the first key is a resource name (str),
the second key is resource field names (Tuple[str, ...]),
and the value describes the reference resource (dict):
* `reference['resource']` (str): Reference name.
* `reference['fields']` (List[str]): Reference field names.
"""
# Parse foreign key rules
# [{fields: [], reference: {name: '', fields: []}, exclude: []}, ...]
rules = []
for name, meta in resources.items():
if "foreign_key_rules" in meta["schema"]:
rule = meta["schema"]["foreign_key_rules"]
rules.extend(
_parse_foreign_key_rule(
rule, name=name, key=meta["schema"]["primary_key"]
)
)
# Build foreign key tree
# [local_name][local_fields] => (reference_name, reference_fields)
tree = defaultdict(dict)
for name, meta in resources.items():
fields = _parse_field_names(meta["schema"]["fields"])
for rule in rules:
local_fields = rule["fields"]
if name not in rule["exclude"] and set(local_fields) <= set(fields):
tree[name][tuple(local_fields)] = rule["reference"]
return dict(tree)
def _traverse_foreign_key_tree(
tree: Dict[str, Dict[Tuple[str, ...], dict]], name: str, fields: Tuple[str, ...]
) -> List[Dict[str, Any]]:
"""Traverse foreign key tree.
Args:
tree: Foreign key tree (see :func:`_build_foreign_key_tree`).
name: Local resource name.
fields: Local resource fields.
Returns:
Sequence of foreign keys starting from `name` and `fields`:
* `fields` (List[str]): Local fields.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference primary key fields.
"""
keys = []
if name not in tree or fields not in tree[name]:
return keys
ref = tree[name][fields]
keys.append({"fields": list(fields), "reference": ref})
if ref["resource"] not in tree:
return keys
for next_fields in tree[ref["resource"]]:
if set(next_fields) <= set(ref["fields"]):
for key in _traverse_foreign_key_tree(tree, ref["resource"], next_fields):
mapped_fields = [
fields[ref["fields"].index(field)] for field in key["fields"]
]
keys.append({"fields": mapped_fields, "reference": key["reference"]})
return keys
def build_foreign_keys(
resources: Dict[str, dict],
prune: bool = True,
) -> Dict[str, List[dict]]:
"""Build foreign keys for each resource.
A resource's `foreign_key_rules` (if present) determines which other resources will
be assigned a foreign key (`foreign_keys`) to the reference's primary key:
* `fields` (List[List[str]]): Sets of field names for which to create a foreign key.
These are assumed to match the order of the reference's primary key fields.
* `exclude` (Optional[List[str]]): Names of resources to exclude.
Args:
resources: Resource descriptors by name.
prune: Whether to prune redundant foreign keys.
Returns:
Foreign keys for each resource (if any), by resource name.
* `fields` (List[str]): Field names.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference resource field names.
Examples:
>>> resources = {
... 'x': {
... 'schema': {
... 'fields': ['z'],
... 'primary_key': ['z'],
... 'foreign_key_rules': {'fields': [['z']]}
... }
... },
... 'y': {
... 'schema': {
... 'fields': ['z', 'yy'],
... 'primary_key': ['z', 'yy'],
... 'foreign_key_rules': {'fields': [['z', 'zz']]}
... }
... },
... 'z': {'schema': {'fields': ['z', 'zz']}}
... }
>>> keys = build_foreign_keys(resources)
>>> keys['z']
[{'fields': ['z', 'zz'], 'reference': {'resource': 'y', 'fields': ['z', 'yy']}}]
>>> keys['y']
[{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}]
>>> keys = build_foreign_keys(resources, prune=False)
>>> keys['z'][0]
{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}
"""
tree = _build_foreign_key_tree(resources)
keys = {}
for name in tree:
firsts = []
followed = []
for fields in tree[name]:
path = _traverse_foreign_key_tree(tree, name, fields)
firsts.append(path[0])
followed.extend(path[1:])
keys[name] = firsts
if prune:
# Keep key if not on path of other key
keys[name] = [key for key in keys[name] if key not in followed]
return keys
# --- Harvest --- #
def split_period(name: str) -> Tuple[str, Optional[str]]:
"""Split the time period from a column name.
Args:
name: Column name.
Returns:
Base name and time period, if any.
Examples:
>>> split_period('report_date')
('report', 'date')
>>> split_period('report_day')
('report_day', None)
>>> split_period('date')
('date', None)
"""
parts = name.rsplit("_", 1)
if len(parts) == 1 or parts[1] not in PERIODS:
return name, None
return parts[0], parts[1]
def expand_periodic_column_names(names: Iterable[str]) -> List[str]:
"""Add smaller periods to a list of column names.
Args:
names: Column names.
Returns:
Column names with additional names for smaller periods.
Examples:
>>> expand_periodic_column_names(['id', 'report_year'])
['id', 'report_year', 'report_quarter', 'report_month', 'report_date']
"""
periods = list(PERIODS)
results = list(names)
for name in names:
base, period = split_period(name)
if period in periods:
results += [f"{base}_{p}" for p in periods[periods.index(period) + 1 :]]
return results
# ---- Aggregation: Column ---- #
"""Aggregation functions.
All take a :class:`pandas.Series` as input (and any optional keyword arguments).
They may either return a single value (ideally of the same data type as the input),
null (:obj:`numpy.nan`),
or raise a :class:`AggregationError` if the input does not meet requirements.
"""
class AggregationError(ValueError):
"""Error raised by aggregation functions."""
pass
def most_frequent(x: pd.Series) -> Any:
"""Return most frequent value (or error if none exists)."""
mode = x.mode(dropna=True)
if mode.size == 1:
return mode[0]
if mode.empty:
return np.nan
raise AggregationError("No value is most frequent.")
def most_and_more_frequent(x: pd.Series, min_frequency: float = None) -> Any:
"""Return most frequent value if more frequent than minimum (or error if none exists).
The minimum frequency ignores null values, so for example,
`1` in `[1, 1, 1, nan]` has a frequency of 1.
"""
x = x.dropna()
mode = x.mode()
if mode.size == 1:
if min_frequency and min_frequency > (x == mode[0]).sum() / len(x):
raise AggregationError(
f"The most frequent value is less frequent than {min_frequency}."
)
return mode[0]
if mode.empty:
return np.nan
raise AggregationError("No value is most frequent.")
def unique(x: pd.Series) -> Any:
"""Return single unique value (or error if none exists)."""
x = x.dropna()
if x.empty:
return np.nan
uniques = x.unique()
if uniques.size == 1:
return uniques[0]
raise AggregationError("Not unique.")
def as_dict(x: pd.Series) -> Dict[Any, list]:
"""Return dictionary of values, listed by index."""
result = {}
x = x.dropna()
for key, xi in x.groupby(x.index):
result[key] = list(xi)
return result
def try_aggfunc( # noqa: C901
func: Callable,
raised: bool = True,
error: Union[str, Callable] = None,
) -> Callable:
"""Wrap aggregate function in a try-except for error handling.
Args:
func: Aggregate function.
raised: Whether :class:`AggregationError` errors are raised or returned.
error: Error value, whose type and format depends on `raise`.
Below, `x` is the original input and `e` is the original error.
* `raised=True`: A string with substitions
(e.g. 'Error at {x.name}: {e}')
that replaces the arguments of the original error.
By default, the original error is raised unchanged.
* `raised=False`: A function with signature `f(x, e)`
returning a value that replaces the arguments of the original error.
By default, the original error is returned unchanged.
Returns:
Aggregate function with custom error handling.
Examples:
>>> x = pd.Series([0, 0, 1, 1], index=['a', 'a', 'a', 'b'])
>>> most_frequent(x)
Traceback (most recent call last):
AggregationError: No value is most frequent.
>>> try_aggfunc(most_frequent, raised=False)(x)
AggregationError('No value is most frequent.')
>>> try_aggfunc(most_frequent, error='Bad dtype {x.dtype}')(x)
Traceback (most recent call last):
AggregationError: Bad dtype int64
>>> error = lambda x, e: as_dict(x)
>>> try_aggfunc(most_frequent, raised=False, error=error)(x)
AggregationError({'a': [0, 0, 1], 'b': [1]})
"""
# Conditional statements outside function for execution speed.
wrapped = func
if raised and error is not None:
def wrapped(x):
try:
return func(x)
except AggregationError as e:
e.args = (error.format(x=x, e=e),) # noqa: FS002
raise e
elif not raised and error is None:
def wrapped(x):
try:
return func(x)
except AggregationError as e:
return e
elif not raised and error is not None:
def wrapped(x):
try:
return func(x)
except AggregationError as e:
e.args = (error(x, e),)
return e
return wrapped
# ---- Aggregation: Table ---- #
def groupby_apply( # noqa: C901
df: pd.DataFrame,
by: Iterable,
aggfuncs: Dict[Any, Callable],
raised: bool = True,
error: Callable = None,
) -> Tuple[pd.DataFrame, Dict[Any, pd.Series]]:
"""Aggregate dataframe and capture errors (using apply).
Args:
df: Dataframe to aggregate.
by: Columns names to use to group rows (see :meth:`pandas.DataFrame.groupby`).
aggfuncs: Aggregation functions for columns not in `by`.
raised: Whether :class:`AggregationError` errors are raised or
replaced with :obj:`np.nan` and returned in an error report.
error: A function with signature `f(x, e) -> Tuple[Any, Any]`,
where `x` is the original input and `e` is the original error,
used when `raised=False`.
The first and second value of the returned tuple are used as the
index and values, respectively,
of the :class:`pandas.Series` returned for each column.
By default, the first value is `x.name`
(the values of columns `by` for that row group),
and the second is the original error.
Returns:
Aggregated dataframe with `by` columns set as the index and
an error report with (if `raised=False`)
a :class:`pandas.Series` for each column where errors occured.
Examples:
>>> df = pd.DataFrame({'x': [0, 0, 1, 1], 'y': pd.Series([2, 2, 2, 3], dtype='Int64')})
>>> df.index = [0, 0, 0, 1]
>>> base = dict(df=df, by='x', aggfuncs={'y': unique})
>>> groupby_apply(**base)
Traceback (most recent call last):
AggregationError: Could not aggregate y at x = 1: Not unique.
>>> _, report = groupby_apply(**base, raised=False)
>>> report['y']
x
1 Not unique.
dtype: object
>>> error = lambda x, e: (x.name, as_dict(x))
>>> _, report = groupby_apply(**base, raised=False, error=error)
>>> report['y']
x
1 {0: [2], 1: [3]}
dtype: object
"""
groupby = df.groupby(by)
data_columns = [col for col in df.columns if col not in by]
series = {}
reports = {}
for col in data_columns:
report = []
aggfunc = aggfuncs[col]
if raised:
msg = f"Could not aggregate {col} at {by} = {{x.name}}: {{e}}"
wrapper = try_aggfunc(aggfunc, raised=True, error=msg)
else:
if error is None:
def error(x, e):
return x.name, str(e)
def wrapper(x):
try:
return aggfunc(x)
except AggregationError as e:
report.append(error(x, e))
return np.nan
ds = groupby[col].apply(wrapper)
if str(ds.dtype) != str(df[col].dtype):
# Undo type changes triggered by nulls
ds = ds.astype(df[col].dtype)
if report:
report = pd.Series(dict(report))
keys = ds.index.names
if report.index.nlevels == len(keys):
report.index.names = keys
reports[col] = report
series[col] = ds
return | pd.DataFrame(series) | pandas.DataFrame |
import pandas as pd
def merge_train():
stances, bodies = load('data/train/train_stances.csv', 'data/train/train_bodies.csv')
return merge(stances, bodies, 'data/train/train.csv')
def merge_test():
stances, bodies = load('data/test/competition_test_stances.csv', 'data/test/competition_test_bodies.csv')
return merge(stances, bodies, 'data/test/test.csv')
def load(stances, bodies):
return | pd.read_csv(stances) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = | DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64) | pandas.DataFrame |
#!env python
#coding=utf-8
# Author: <EMAIL>
import numpy as np
import pandas as pd
import sklearn
from sklearn.linear_model import Perceptron
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
class Model():
def __init__(self):
self.w = np.ones(len(data[0])-1, dtype=np.float32)
self.b = 0
self.l_rate = 0.1
#self.data = data
def sign(self, x, w, b):
y = np.dot(x,w)+b
return y
def fit(self, X_traing, y_train):
is_Wrong = False
while not is_Wrong:
wrong_count = 0
for d in range(len(X_train)):
x = X_train[d]
y = y_train[d]
if y*sign(x, self.w, self.b) <= 0:
self.w = self.w + self.l_rate * np.dot(y,x)
self.b = self.b + self.l_rate * y
wrong_count += 1
if wrong_count ==0:
is_Wrong = True
return "Preceptron Model!"
def score(self):
pass
def load_data():
#load data
irdata = load_iris()
df = | pd.DataFrame(irdata.data, columns=irdata.feature_names) | pandas.DataFrame |
import json
import logging
import boto3
import pandas as pd
logging.basicConfig(
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", level=logging.INFO
)
def write_file_to_s3(bucket: str, key: str, data: (list[dict], str), profile_name=None):
"""
Writes a file to s3. Json objects will be serialised before writing.
:param bucket: The bucket to write to in s3.
:param key: The key path and filename where the data will be stored.
:param data: The data object to be written.
:param profile_name: Optional AWS profile name.
:return: None
"""
logging.info(f"Attempting to write data to s3://{bucket}/{key}")
if profile_name is None:
boto3_session = boto3.Session()
else:
boto3_session = boto3.Session(profile_name=profile_name)
s3_resource = boto3_session.resource("s3")
s3_object = s3_resource.Object(bucket, key)
data_frame = | pd.DataFrame(data) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_numpy_array_equal(rng, values)
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assertEqual(val.time(), the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assertTrue(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assertTrue(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
self.assertTrue(result1.equals(result4))
self.assertTrue(result2.equals(result3))
def test_shift(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
self.assertTrue(result.index.equals(exp_index))
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + datetools.Hour(4)
self.assertTrue(result.index.equals(exp_index))
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.shift, 1)
def test_setops_preserve_freq(self):
rng = date_range('1/1/2000', '1/1/2002')
result = rng[:50].union(rng[50:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[30:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[60:100])
self.assertIsNone(result.freq)
result = rng[:50].intersection(rng[25:75])
self.assertEqual(result.freqstr, 'D')
nofreq = DatetimeIndex(list(rng[25:75]))
result = rng[:50].union(nofreq)
self.assertEqual(result.freq, rng.freq)
result = rng[:50].intersection(nofreq)
self.assertEqual(result.freq, rng.freq)
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
tm.assert_isinstance(the_min, Timestamp)
tm.assert_isinstance(the_max, Timestamp)
self.assertEqual(the_min, rng[0])
self.assertEqual(the_max, rng[-1])
self.assertEqual(rng.min(), rng[0])
self.assertEqual(rng.max(), rng[-1])
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)),
'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iget(-1))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
result = df.TS.min()
exp = Timestamp(df.TS.iget(0))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0),
datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
self.assertEqual(df['Date'][0], dates[0][0])
self.assertEqual(df['Forecasting'][0], dates[0][1])
s = Series(arr['Date'])
self.assertTrue(s[0], Timestamp)
self.assertEqual(s[0], dates[0][0])
s = Series.from_array(arr['Date'], Index([0]))
self.assertEqual(s[0], dates[0][0])
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3],
[0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp))
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
self.assertTrue(df.x1.dtype == 'M8[ns]')
def test_date_range_fy5252(self):
dr = date_range(start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1,
weekday=3,
variation="nearest"))
self.assertEqual(dr[0], Timestamp('2013-01-31'))
self.assertEqual(dr[1], Timestamp('2014-01-30'))
class TimeConversionFormats(tm.TestCase):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [ Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301') ]
results2 = [ Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103') ]
for vals, expecteds in [ (values, (Index(results1), Index(results2))),
(Series(values),( | Series(results1) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from sklearn import preprocessing
scaler = preprocessing.MinMaxScaler()
df = pd.read_csv('./data/Coinbase_BTCUSD_1h_2.csv')
df = df.dropna().reset_index().sort_values('Date')
scaled_df = df[['Open', 'Close', 'High', 'Low']].values
scaled_df = scaler.fit_transform(scaled_df.astype('float64'))
scaled_df = | pd.DataFrame(scaled_df, columns=['Open', 'Close', 'High', 'Low']) | pandas.DataFrame |
"""
The goal of this module is to take a BED file containing regions of the genome we wish to exclude.
This module will then examine those regions and build a kmer mutability model based on those regions.
"""
from collections import defaultdict, Counter
import time
from cyvcf2 import VCF
from pyfaidx import Fasta, FetchError
import eskedit as ek
import pandas as pd
import multiprocessing as mp
from eskedit import GRegion, get_autosome_lengths_grch38, get_grch38_chroms, RegionContainer, Variant, DataContainer
import sys
import array
"""
This currently works with 3, 5, and 7mer contexts. The expected input is a tab separated
file with the following fields in this order:
CHROM POS REF ALT`
1. CHROM - chromosome should be as reported by grch38
2. POS - position on chromosome aligned to hg38
3. REF - reference allele
4. ALT - alternate allele from VCF
* Additional fields after this will be ignored
"""
def get_bed_regions(bed_path, invert_selection=True, header=False, clean_bed=False, strand_col=None,
bed_names_col=None):
"""
Returns an iterable of GRegions specified by the filepath in bed format.
:param bed_path: Path to bed file
:param invert_selection: True (default) will return GRegions not in the file
:param header: True if file has a header line. False (default)
:param clean_bed: False (default) means bed file may have overlapping regions which will be merged. True means each line is added independently of position
:param strand_col: Zero-based column index containing strand information
:param bed_names_col: Zero-based column index containing name information
:return: Iterable of GRegions
"""
additional_fields = defaultdict(int)
if strand_col is not None:
try:
strand_col = int(strand_col)
additional_fields['strand'] = strand_col
except ValueError:
strand_col = None
if bed_names_col is not None:
try:
bed_names_col = int(bed_names_col)
additional_fields['name'] = bed_names_col
except ValueError:
bed_names_col = None
regions = RegionContainer()
with open(bed_path, 'r') as bedfile:
kwargs = defaultdict(str)
if header:
bedfile.readline()
for line in bedfile.readlines():
fields = line.split('\t')
# add keyword arguments to pass to GRegion constructor
for k, v in additional_fields.items():
kwargs[k] = fields[v]
if clean_bed:
regions.add_distinct_region(GRegion(*[fields[0], fields[1], fields[2]], **kwargs))
else:
regions.add_region(GRegion(*[fields[0], fields[1], fields[2]], **kwargs))
if invert_selection:
return regions.get_inverse()
else:
return regions.get_regions()
def model_region_singletons(data_container, vcf_path, fasta_path, kmer_size, region):
start = time.time()
fasta = Fasta(fasta_path)
vcf = VCF(vcf_path)
start_idx_offset = int(kmer_size / 2 + 1)
kmer_mid_idx = int(start_idx_offset - 1)
try:
if region.strand is not None:
if ek.is_dash(region.strand):
sequence = fasta.get_seq(region.chrom, region.start-kmer_mid_idx, region.stop+kmer_mid_idx).complement.seq.upper()
else:
sequence = fasta.get_seq(region.chrom, region.start-kmer_mid_idx, region.stop+kmer_mid_idx).seq.upper()
else:
sequence = fasta.get_seq(region.chrom, region.start-kmer_mid_idx, region.stop+kmer_mid_idx).seq.upper()
except (KeyError, FetchError):
print('Region %s not found in fasta, continuing...' % str(region), file=sys.stderr, flush=True)
return
region_ref_counts = ek.kmer_search(sequence, kmer_size) # nprocs=1 due to short region
r_string = str(region.chrom) + ':' + str(region.start) + '-' + str(region.stop)
transitions = defaultdict(lambda: array.array('L', [0, 0, 0, 0]))
# Define indices for nucleotides
nuc_idx = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
idx_nuc = list('ACGT')
for variant in vcf(r_string):
if ek.is_singleton_snv(variant):
new_var = Variant(variant=variant, fields=['vep'])
# take 7mer around variant. pyfaidx excludes start index and includes end index
adj_seq = fasta[str(new_var.CHROM)][(new_var.POS - start_idx_offset):(new_var.POS + kmer_mid_idx)].seq
if str(adj_seq[kmer_mid_idx]).upper() != str(variant.REF).upper():
print('WARNING: Reference mismatch\tFasta REF: %s\tVCF REF: %s' % (adj_seq[kmer_mid_idx], variant.REF),
file=sys.stderr, flush=True)
if ek.complete_sequence(adj_seq):
transitions[adj_seq.upper()][nuc_idx[new_var.ALT[0]]] += 1
temp = data_container.get()
temp.add_kmer_counts(region_ref_counts)
temp.add_transition(transitions)
data_container.set(temp)
print('Finished region %s in %s' % (str(region), str(time.time() - start)), flush=True)
return
def model_region_nonsingletons(data_container, vcf_path, fasta_path, kmer_size, region, AC_cutoff):
if AC_cutoff is not None:
try:
AC_cutoff = int(AC_cutoff)
except ValueError:
AC_cutoff = None
print('AC cutoff must be a positive integer. Ignoring user value and using SNVs with any AC.', file=sys.stderr, flush=True)
try:
kmer_size = int(kmer_size)
if kmer_size < 1: raise ValueError
except ValueError:
print('kmer_size must be a positive integer. Please check arguments.', file=sys.stderr, flush=True)
exit(1)
start = time.time()
fasta = Fasta(fasta_path)
vcf = VCF(vcf_path)
start_idx_offset = int(kmer_size / 2 + 1)
kmer_mid_idx = int(start_idx_offset - 1)
try:
if region.strand is not None:
if ek.is_dash(region.strand):
sequence = fasta.get_seq(region.chrom, region.start-kmer_mid_idx, region.stop+kmer_mid_idx).complement.seq.upper()
else:
sequence = fasta.get_seq(region.chrom, region.start-kmer_mid_idx, region.stop+kmer_mid_idx).seq.upper()
else:
sequence = fasta.get_seq(region.chrom, region.start-kmer_mid_idx, region.stop+kmer_mid_idx).seq.upper()
except (KeyError, FetchError):
print('Region %s not found in fasta, continuing...' % str(region), file=sys.stderr, flush=True)
return
region_ref_counts = ek.kmer_search(sequence, kmer_size) # nprocs=1 due to short region
r_string = str(region.chrom) + ':' + str(region.start) + '-' + str(region.stop)
ac_transitions = defaultdict(lambda: array.array('L', [0, 0, 0, 0]))
an_transitions = defaultdict(lambda: array.array('L', [0, 0, 0, 0]))
# Define indices for nucleotides
nuc_idx = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
idx_nuc = list('ACGT')
for variant in vcf(r_string):
if ek.is_quality_snv(variant, AC_cutoff=AC_cutoff):
new_var = Variant(variant=variant)
adj_seq = fasta[str(new_var.CHROM)][(new_var.POS - start_idx_offset):(new_var.POS + kmer_mid_idx)].seq
if str(adj_seq[kmer_mid_idx]).upper() != str(variant.REF).upper():
print('WARNING: Reference mismatch\tFasta REF: %s\tVCF REF: %s' % (adj_seq[kmer_mid_idx], variant.REF),
file=sys.stderr, flush=True)
if ek.complete_sequence(adj_seq):
ac_transitions[adj_seq.upper()][nuc_idx[new_var.ALT[0]]] += new_var.AC
an_transitions[adj_seq.upper()][nuc_idx[new_var.ALT[0]]] += new_var.AN
# if ek.is_singleton_snv(variant):
# new_var = Variant(variant=variant, fields=['vep'])
# # take 7mer around variant. pyfaidx excludes start index and includes end index
# adj_seq = fasta[str(new_var.CHROM)][(new_var.POS - start_idx_offset):(new_var.POS + kmer_mid_idx)].seq
# if str(adj_seq[kmer_mid_idx]).upper() != str(variant.REF).upper():
# print('WARNING: Reference mismatch\tFasta REF: %s\tVCF REF: %s' % (adj_seq[kmer_mid_idx], variant.REF), file=sys.stderr, flush=True)
# if ek.complete_sequence(adj_seq):
# transitions[adj_seq.upper()][nuc_idx[new_var.ALT[0]]] += 1
temp = data_container.get()
temp.add_kmer_counts(region_ref_counts)
temp.add_transition(ac_transitions)
temp.add_transition2(an_transitions)
data_container.set(temp)
print('Finished region %s in %s' % (str(region), str(time.time() - start)), flush=True)
return
def OLDtrain_kmer_model(bed_path, vcf_path, fasta_path, kmer_size, nprocs=1, invert_selection=True,
clean_bed=False, singletons=False, nonsingletons=False,
header=False, strand_col=None, bed_names_col=None, AC_cutoff=None):
"""
Builds the counts tables required for the k-mer model. Returned as 2 dictionaries.
@param AC_cutoff: Specify to filter out variants above a given AC (AC > cutoff will be filtered)
** works only if keyword 'nonsingletons=True' **
@param nonsingletons: Set true to train model on all SNVs
@param singletons: Set true to train model based on singleton variants
@param strand_col: zero-based column index of strand information from bed file
@param bed_names_col: zero-based column index of name information from bed file
@param bed_path: path to bed file
@param vcf_path: path to vcf file
@param fasta_path: path to reference fasta
@param kmer_size: NOTE unpredictable behavior may occur if even numbers are used here.
@param nprocs: number of processors to use
@param invert_selection: True (default) process regions NOT specified by bed file
@param clean_bed: False (default) if the bed needs to be merged. True processes regions as is
@param header: False (default) if the bed file does not have a header
@return:
"""
try:
nprocs = int(nprocs)
if strand_col is not None:
strand_col = int(strand_col)
if bed_names_col is not None:
bed_names_col = int(bed_names_col)
except ValueError:
print('ERROR: nprocs and column indices must be integers', file=sys.stderr, flush=True)
exit(1)
manager = mp.Manager()
# set up so master data count stays in shared memory
dc = manager.Value(DataContainer, DataContainer())
regions = get_bed_regions(bed_path, invert_selection=invert_selection, header=header, clean_bed=clean_bed,
strand_col=strand_col, bed_names_col=bed_names_col)
# Bundle arguments to pass to 'model_region' function
pool = mp.Pool(nprocs)
# Distribute workload
results = defaultdict(tuple)
if singletons:
arguments = [(dc, vcf_path, fasta_path, kmer_size, region) for region in regions]
pool.starmap_async(model_region_singletons, arguments)
pool.close()
pool.join()
results['singletons'] = dc.value.get()
if nonsingletons:
arguments = [(dc, vcf_path, fasta_path, kmer_size, region, AC_cutoff) for region in regions]
pool.starmap_async(model_region_nonsingletons, arguments)
pool.close()
pool.join()
results['all_variants'] = dc.value.get()
return results # master_ref_counts, transitions_list
def generate_frequency_table(reference_counts, transition_counts, filepath=False, save_file=None):
if filepath:
counts = pd.read_csv(reference_counts, index_col=0).sort_index()
transitions = pd.read_csv(transition_counts, index_col=0).sort_index()
else:
counts = pd.DataFrame.from_dict(reference_counts, orient='index').sort_index()
transitions = | pd.DataFrame.from_dict(transition_counts, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on 2018-4-17
@author: cheng.li
"""
import random
import unittest
import numpy as np
import pandas as pd
from PyFin.api import CSQuantiles
from PyFin.api import CSRank
from PyFin.api import advanceDateByCalendar
from PyFin.api import bizDatesList
from PyFin.api import makeSchedule
from scipy.stats import rankdata
from sqlalchemy import select, and_, or_
from alphamind.data.dbmodel.models import IndexComponent
from alphamind.data.dbmodel.models import IndexMarket
from alphamind.data.dbmodel.models import Industry
from alphamind.data.dbmodel.models import Market
from alphamind.data.dbmodel.models import RiskCovShort
from alphamind.data.dbmodel.models import RiskExposure
from alphamind.data.dbmodel.models import Universe as UniverseTable
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.universe import Universe
from alphamind.tests.test_suite import DATA_ENGINE_URI
from alphamind.tests.test_suite import SKIP_ENGINE_TESTS
from alphamind.utilities import alpha_logger
@unittest.skipIf(SKIP_ENGINE_TESTS, "Omit sql engine tests")
class TestSqlEngine(unittest.TestCase):
def setUp(self):
self.engine = SqlEngine(DATA_ENGINE_URI)
dates_list = bizDatesList('china.sse', '2010-10-01', '2018-04-27')
self.ref_date = random.choice(dates_list).strftime('%Y-%m-%d')
alpha_logger.info("Test date: {0}".format(self.ref_date))
def test_sql_engine_fetch_codes(self):
ref_date = self.ref_date
universe = Universe('zz500') + Universe('zz1000')
codes = self.engine.fetch_codes(ref_date, universe)
query = select([UniverseTable.code]).where(
and_(
UniverseTable.trade_date == ref_date,
or_(
UniverseTable.zz500 == 1,
UniverseTable.zz1000 == 1
)
)
)
df = pd.read_sql(query, con=self.engine.engine).sort_values('code')
self.assertListEqual(codes, list(df.code.values))
def test_sql_engine_fetch_codes_range(self):
ref_dates = makeSchedule(advanceDateByCalendar('china.sse', self.ref_date, '-6m'),
self.ref_date,
'60b', 'china.sse')
universe = Universe('zz500') + Universe('zz1000')
codes = self.engine.fetch_codes_range(universe, dates=ref_dates)
query = select([UniverseTable.trade_date, UniverseTable.code]).where(
and_(
UniverseTable.trade_date.in_(ref_dates),
or_(
UniverseTable.zz500 == 1,
UniverseTable.zz1000 == 1
)
)
)
df = pd.read_sql(query, con=self.engine.engine).sort_values('code')
for ref_date in ref_dates:
calculated_codes = list(sorted(codes[codes.trade_date == ref_date].code.values))
expected_codes = list(sorted(df[df.trade_date == ref_date].code.values))
self.assertListEqual(calculated_codes, expected_codes)
def test_sdl_engine_fetch_codes_with_exclude_universe(self):
ref_date = self.ref_date
universe = Universe('zz500') - Universe('cyb')
codes = self.engine.fetch_codes(ref_date, universe)
query = select([UniverseTable.code]).where(
and_(
UniverseTable.trade_date == ref_date,
UniverseTable.zz500 == 1,
UniverseTable.cyb == 0
)
)
df = pd.read_sql(query, con=self.engine.engine).sort_values('code')
self.assertListEqual(codes, list(df.code.values))
def test_sql_engine_fetch_dx_return(self):
horizon = 4
offset = 1
ref_date = self.ref_date
universe = Universe('zz500') + Universe('zz1000')
codes = self.engine.fetch_codes(ref_date, universe)
dx_return = self.engine.fetch_dx_return(ref_date, codes, horizon=horizon, offset=offset)
start_date = advanceDateByCalendar('china.sse', ref_date, '2b')
end_date = advanceDateByCalendar('china.sse', ref_date, '6b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(dx_return.code.unique().tolist())
)
)
df = pd.read_sql(query, con=self.engine.engine)
res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
np.testing.assert_array_almost_equal(dx_return.dx.values, res.chgPct.values)
horizon = 4
offset = 0
ref_date = self.ref_date
universe = Universe('zz500') + Universe('zz1000')
codes = self.engine.fetch_codes(ref_date, universe)
dx_return = self.engine.fetch_dx_return(ref_date, codes, horizon=horizon, offset=offset)
start_date = advanceDateByCalendar('china.sse', ref_date, '1b')
end_date = advanceDateByCalendar('china.sse', ref_date, '5b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(dx_return.code.unique().tolist())
)
)
df = pd.read_sql(query, con=self.engine.engine)
res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
np.testing.assert_array_almost_equal(dx_return.dx.values, res.chgPct.values)
def test_sql_engine_fetch_dx_return_with_benchmark(self):
horizon = 4
offset = 1
benchmark = 300
ref_date = self.ref_date
universe = Universe('zz500') + Universe('zz1000')
codes = self.engine.fetch_codes(ref_date, universe)
dx_return = self.engine.fetch_dx_return(ref_date, codes, horizon=horizon, offset=offset,
benchmark=benchmark)
start_date = advanceDateByCalendar('china.sse', ref_date, '2b')
end_date = advanceDateByCalendar('china.sse', ref_date, '6b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(dx_return.code.unique().tolist())
)
)
df = pd.read_sql(query, con=self.engine.engine)
res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
query = select([IndexMarket.indexCode.label('code'), IndexMarket.chgPct]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark
)
)
df = pd.read_sql(query, con=self.engine.engine)
b_res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
np.testing.assert_array_almost_equal(dx_return.dx.values,
res.chgPct.values - b_res.chgPct.values)
horizon = 4
offset = 0
ref_date = self.ref_date
universe = Universe('zz500') + Universe('zz1000')
codes = self.engine.fetch_codes(ref_date, universe)
dx_return = self.engine.fetch_dx_return(ref_date, codes, horizon=horizon, offset=offset,
benchmark=benchmark)
start_date = advanceDateByCalendar('china.sse', ref_date, '1b')
end_date = advanceDateByCalendar('china.sse', ref_date, '5b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(dx_return.code.unique().tolist())
)
)
df = pd.read_sql(query, con=self.engine.engine)
res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
query = select([IndexMarket.indexCode.label('code'), IndexMarket.chgPct]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark
)
)
df = pd.read_sql(query, con=self.engine.engine)
b_res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
np.testing.assert_array_almost_equal(dx_return.dx.values,
res.chgPct.values - b_res.chgPct.values)
def test_sql_engine_fetch_dx_return_range(self):
ref_dates = makeSchedule(advanceDateByCalendar('china.sse', self.ref_date, '-6m'),
self.ref_date,
'60b', 'china.sse')
universe = Universe('zz500') + Universe('zz1000')
dx_return = self.engine.fetch_dx_return_range(universe,
dates=ref_dates,
horizon=4,
offset=1)
codes = self.engine.fetch_codes_range(universe, dates=ref_dates)
groups = codes.groupby('trade_date')
for ref_date, g in groups:
start_date = advanceDateByCalendar('china.sse', ref_date, '2b')
end_date = advanceDateByCalendar('china.sse', ref_date, '6b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(g.code.unique().tolist())
)
)
df = pd.read_sql(query, con=self.engine.engine)
res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
calculated_return = dx_return[dx_return.trade_date == ref_date]
np.testing.assert_array_almost_equal(calculated_return.dx.values, res.chgPct.values)
def test_sql_engine_fetch_dx_return_range_with_benchmark(self):
ref_dates = makeSchedule(advanceDateByCalendar('china.sse', self.ref_date, '-6m'),
self.ref_date,
'60b', 'china.sse')
universe = Universe('zz500') + Universe('zz1000')
benchmark = 300
dx_return = self.engine.fetch_dx_return_range(universe,
dates=ref_dates,
horizon=4,
offset=1,
benchmark=benchmark)
codes = self.engine.fetch_codes_range(universe, dates=ref_dates)
groups = codes.groupby('trade_date')
for ref_date, g in groups:
start_date = advanceDateByCalendar('china.sse', ref_date, '2b')
end_date = advanceDateByCalendar('china.sse', ref_date, '6b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(g.code.unique().tolist())
)
)
df = pd.read_sql(query, con=self.engine.engine)
res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
query = select([IndexMarket.indexCode.label('code'), IndexMarket.chgPct]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark
)
)
df = pd.read_sql(query, con=self.engine.engine)
b_res = df.groupby('code').apply(lambda x: np.log(1. + x).sum())
calculated_return = dx_return[dx_return.trade_date == ref_date]
np.testing.assert_array_almost_equal(calculated_return.dx.values,
res.chgPct.values - b_res.chgPct.values)
def test_sql_engine_fetch_dx_return_with_universe_adjustment(self):
ref_dates = makeSchedule(advanceDateByCalendar('china.sse', '2017-01-26', '-6m'),
'2017-01-26',
'60b', 'china.sse')
universe = Universe('zz500')
dx_return = self.engine.fetch_dx_return_range(universe,
dates=ref_dates,
horizon=4,
offset=1)
codes = self.engine.fetch_codes_range(universe, dates=ref_dates)
groups = codes.groupby('trade_date')
for ref_date, g in groups:
start_date = advanceDateByCalendar('china.sse', ref_date, '2b')
end_date = advanceDateByCalendar('china.sse', ref_date, '6b')
query = select([Market.code, Market.chgPct]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(g.code.unique().tolist())
)
)
df = | pd.read_sql(query, con=self.engine.engine) | pandas.read_sql |
from gibbon.maps import MapSensor, TileMap, TerrainMap
from gibbon.fem import ShapeBasedFiniteGrid, LineBasedFiniteGrid
from gibbon.utility import Convert
from gibbon.web_api import Amap, Bmap
from shapely.geometry import Polygon
import pandas as pd
class Project:
def __init__(
self,
polyline,
origin,
radius=2000,
density=1
):
self.sensor = MapSensor(origin, radius)
self.tile_map = TileMap(self.sensor)
self.terrain_map = TerrainMap(self.sensor)
self.fem_red_line = LineBasedFiniteGrid(polyline, density)
self.fem_site = ShapeBasedFiniteGrid(polyline, density)
bounds = self.sensor.bounds
line = [bounds[0], [bounds[1][0], bounds[0][1]], bounds[1], [bounds[0][0], bounds[1][1]]]
self.fem_map = ShapeBasedFiniteGrid(line, density)
self.amap = Amap()
self.bmap = Bmap()
def pois_by_keyword(self, keyword):
llbounds = self.sensor.llbounds[0] + self.sensor.llbounds[1]
return self.bmap.pois_by_keyword_bounds(keyword, llbounds)
def setup(self):
pass
if __name__ == '__main__':
boundary = [
[135645.11278065387, 32315.40416692337],
[135645.11278029159, 201671.17918046517],
[126952.82788838632, 211814.94409043854],
[85289.83720657602, 216309.82957304642],
[43411.964759724215, 217810.69178508036],
[-162833.7758713793, 217810.69178540818],
[-187833.77586947195, 192810.69178564614],
[-187833.77586565679, 142810.69178516977],
[-191333.77586374991, 112810.69178528897],
[-191333.77585802786, 13810.932417852804],
[-187710.77355013601, -17243.373066889122],
[-184568.05179033987, -74563.56585736759],
[-178656.31940851919, -122455.30853326805],
[-169447.6962624616, -182124.46695764549],
[-168331.07474528067, -212307.29579167254],
[-150410.09518061439, -328429.95081900246],
[-142375.37355051748, -357545.12295277603],
[-134252.77894793218, -410177.13715671189],
[-113968.53712664358, -423936.7188313175],
[-62660.312091929838, -412856.00462846644],
[-91011.301433665678, -165898.26749964245],
[135656.22394360788, -165898.26749976166],
[135656.22394360788, -115189.30818407424],
[126656.22394360788, -106189.30821271427],
[126686.89403142221, -97189.308212356642],
[57662.24364461191, -97189.308212356642],
[57461.112780706026, 32315.40416692337],
[135645.11278065387, 32315.40416692337]
]
project = Project(boundary, origin=[113.520280, 22.130790])
path = r'C:\Users\wenhs\Desktop'
project.fem_red_line.dump_mesh(path + r'\fem_red_line.json')
project.fem_site.dump_mesh(path + r'\fem_site.json')
project.fem_map.dump_mesh(path + r'\fem_map.json')
dfs = list()
kinds = ['公交车站', '住宅']
for k in kinds:
pois = project.pois_by_keyword(k)
location, names = pois['location'], pois['name']
lnglats = location.apply(lambda x: [x['lng'], x['lat']])
coords = lnglats.apply(
lambda x: Convert.lnglat_to_mercator(x, project.sensor.origin)
)
rst = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.